language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/dynamic/support/ResolvableType.java
|
{
"start": 48262,
"end": 49679
}
|
class ____ implements ParameterizedType, Serializable {
private final Type rawType;
private final Type[] typeArguments;
public SyntheticParameterizedType(Type rawType, Type[] typeArguments) {
this.rawType = rawType;
this.typeArguments = typeArguments;
}
@Override
public Type getOwnerType() {
return null;
}
@Override
public Type getRawType() {
return this.rawType;
}
@Override
public Type[] getActualTypeArguments() {
return this.typeArguments;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof ParameterizedType)) {
return false;
}
ParameterizedType otherType = (ParameterizedType) other;
return (otherType.getOwnerType() == null && this.rawType.equals(otherType.getRawType())
&& Arrays.equals(this.typeArguments, otherType.getActualTypeArguments()));
}
@Override
public int hashCode() {
return (this.rawType.hashCode() * 31 + Arrays.hashCode(this.typeArguments));
}
}
/**
* Internal helper to handle bounds from {@link WildcardType}s.
*/
private static
|
SyntheticParameterizedType
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/suppress/CustomSuppressionTest.java
|
{
"start": 3771,
"end": 4448
}
|
class ____ {
@SuppressBothCheckers
Comparable<Integer> myComparable =
new Comparable<Integer>() {
@Override
public int compareTo(Integer other) {
return -1;
}
};
}
""")
.doTest();
}
@Test
public void myCheckerIsNotSuppressedWithWrongCustomAnnotation() {
CompilationTestHelper.newInstance(MyChecker.class, getClass())
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.suppress.CustomSuppressionTest.SuppressMyChecker2;
|
Test
|
java
|
apache__maven
|
its/core-it-suite/src/test/resources/mng-5753-custom-mojo-execution-configurator/plugin/src/main/java/org/apache/maven/its/mng5753/plugin/TestMojo.java
|
{
"start": 2107,
"end": 2795
}
|
class ____ extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
private MavenProject project;
@Parameter
private String name;
public void execute() throws MojoExecutionException {
try {
File file = new File(project.getBasedir(), "configuration.txt");
file.getParentFile().mkdirs();
Writer w = new OutputStreamWriter(new FileOutputStream(file, true), "UTF-8");
try {
w.write(name);
} finally {
w.close();
}
} catch (IOException e) {
throw new MojoExecutionException(e.getMessage(), e);
}
}
}
|
TestMojo
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
|
{
"start": 1733,
"end": 2616
}
|
class ____ {
@Public
@Unstable
public static SchedulingRequest newInstance(long allocationRequestId,
Priority priority, ExecutionTypeRequest executionType,
Set<String> allocationTags, ResourceSizing resourceSizing,
PlacementConstraint placementConstraintExpression) {
return SchedulingRequest.newBuilder()
.allocationRequestId(allocationRequestId).priority(priority)
.executionType(executionType).allocationTags(allocationTags)
.resourceSizing(resourceSizing)
.placementConstraintExpression(placementConstraintExpression).build();
}
@Public
@Unstable
public static SchedulingRequestBuilder newBuilder() {
return new SchedulingRequestBuilder();
}
/**
* Class to construct instances of {@link SchedulingRequest} with specific
* options.
*/
@Public
@Unstable
public static final
|
SchedulingRequest
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/common/utils/TestAllowClassNotifyListener.java
|
{
"start": 1024,
"end": 2740
}
|
class ____ implements AllowClassNotifyListener {
private static final AtomicReference<SerializeCheckStatus> status = new AtomicReference<>();
private static final AtomicReference<Set<String>> allowedList = new AtomicReference<>();
private static final AtomicReference<Set<String>> disAllowedList = new AtomicReference<>();
private static final AtomicBoolean checkSerializable = new AtomicBoolean();
private static final AtomicInteger count = new AtomicInteger(0);
@Override
public void notifyPrefix(Set<String> allowedList, Set<String> disAllowedList) {
TestAllowClassNotifyListener.allowedList.set(allowedList);
TestAllowClassNotifyListener.disAllowedList.set(disAllowedList);
count.incrementAndGet();
}
@Override
public void notifyCheckStatus(SerializeCheckStatus status) {
TestAllowClassNotifyListener.status.set(status);
count.incrementAndGet();
}
@Override
public void notifyCheckSerializable(boolean checkSerializable) {
TestAllowClassNotifyListener.checkSerializable.set(checkSerializable);
count.incrementAndGet();
}
public static SerializeCheckStatus getStatus() {
return status.get();
}
public static Set<String> getAllowedList() {
return allowedList.get();
}
public static Set<String> getDisAllowedList() {
return disAllowedList.get();
}
public static boolean isCheckSerializable() {
return checkSerializable.get();
}
public static int getCount() {
return count.get();
}
public static void setCount(int count) {
TestAllowClassNotifyListener.count.set(count);
}
}
|
TestAllowClassNotifyListener
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/DirectExecutorService.java
|
{
"start": 1357,
"end": 1432
}
|
class ____ implements ExecutorService {
private static
|
DirectExecutorService
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java
|
{
"start": 3760,
"end": 27259
}
|
class ____ extends AggregatorTestCase {
private static final String LONG_FIELD = "numeric";
private static final String KEYWORD_FIELD = "keyword";
private static final List<Long> dataset;
static {
List<Long> d = new ArrayList<>(45);
for (long i = 0; i < 10; i++) {
for (int j = 0; j < i; j++) {
d.add(i);
}
}
dataset = d;
}
public void testMatchNoDocs() throws IOException {
testSearchCase(
new MatchNoDocsQuery(),
dataset,
aggregation -> aggregation.field(KEYWORD_FIELD).maxDocCount(1),
agg -> assertEquals(0, agg.getBuckets().size())
);
testSearchCase(
new MatchNoDocsQuery(),
dataset,
aggregation -> aggregation.field(LONG_FIELD).maxDocCount(1),
agg -> assertEquals(0, agg.getBuckets().size())
);
}
public void testMatchAllDocs() throws IOException {
Query query = new MatchAllDocsQuery();
testSearchCase(query, dataset, aggregation -> aggregation.field(LONG_FIELD).maxDocCount(1), agg -> {
assertEquals(1, agg.getBuckets().size());
LongRareTerms.Bucket bucket = (LongRareTerms.Bucket) agg.getBuckets().get(0);
assertThat(bucket.getKey(), equalTo(1L));
assertThat(bucket.getDocCount(), equalTo(1L));
});
testSearchCase(query, dataset, aggregation -> aggregation.field(KEYWORD_FIELD).maxDocCount(1), agg -> {
assertEquals(1, agg.getBuckets().size());
StringRareTerms.Bucket bucket = (StringRareTerms.Bucket) agg.getBuckets().get(0);
assertThat(bucket.getKeyAsString(), equalTo("1"));
assertThat(bucket.getDocCount(), equalTo(1L));
});
}
public void testManyDocsOneRare() throws IOException {
Query query = new MatchAllDocsQuery();
List<Long> d = new ArrayList<>(500);
for (int i = 1; i < 500; i++) {
d.add((long) i);
d.add((long) i);
}
// The one rare term
d.add(0L);
testSearchCase(query, d, aggregation -> aggregation.field(LONG_FIELD).maxDocCount(1), agg -> {
assertEquals(1, agg.getBuckets().size());
LongRareTerms.Bucket bucket = (LongRareTerms.Bucket) agg.getBuckets().get(0);
assertThat(bucket.getKey(), equalTo(0L));
assertThat(bucket.getDocCount(), equalTo(1L));
});
testSearchCase(query, d, aggregation -> aggregation.field(KEYWORD_FIELD).maxDocCount(1), agg -> {
assertEquals(1, agg.getBuckets().size());
StringRareTerms.Bucket bucket = (StringRareTerms.Bucket) agg.getBuckets().get(0);
assertThat(bucket.getKeyAsString(), equalTo("0"));
assertThat(bucket.getDocCount(), equalTo(1L));
});
}
public void testIncludeExclude() throws IOException {
Query query = new MatchAllDocsQuery();
testSearchCase(
query,
dataset,
aggregation -> aggregation.field(LONG_FIELD)
.maxDocCount(2) // bump to 2 since we're only including "2"
.includeExclude(new IncludeExclude(null, null, new TreeSet<>(Set.of(new BytesRef("2"))), new TreeSet<>())),
agg -> {
assertEquals(1, agg.getBuckets().size());
LongRareTerms.Bucket bucket = (LongRareTerms.Bucket) agg.getBuckets().get(0);
assertThat(bucket.getKey(), equalTo(2L));
assertThat(bucket.getDocCount(), equalTo(2L));
}
);
}
public void testEmbeddedMaxAgg() throws IOException {
Query query = new MatchAllDocsQuery();
testSearchCase(query, dataset, aggregation -> {
MaxAggregationBuilder max = new MaxAggregationBuilder("the_max").field(LONG_FIELD);
aggregation.field(LONG_FIELD).maxDocCount(1).subAggregation(max);
}, agg -> {
assertEquals(1, agg.getBuckets().size());
LongRareTerms.Bucket bucket = (LongRareTerms.Bucket) agg.getBuckets().get(0);
assertThat(bucket.getKey(), equalTo(1L));
assertThat(bucket.getDocCount(), equalTo(1L));
InternalAggregations children = bucket.getAggregations();
assertThat(children.asList().size(), equalTo(1));
assertThat(children.asList().get(0).getName(), equalTo("the_max"));
assertThat(((Max) (children.asList().get(0))).value(), equalTo(1.0));
});
testSearchCase(query, dataset, aggregation -> {
MaxAggregationBuilder max = new MaxAggregationBuilder("the_max").field(LONG_FIELD);
aggregation.field(KEYWORD_FIELD).maxDocCount(1).subAggregation(max);
}, agg -> {
assertEquals(1, agg.getBuckets().size());
StringRareTerms.Bucket bucket = (StringRareTerms.Bucket) agg.getBuckets().get(0);
assertThat(bucket.getKey(), equalTo("1"));
assertThat(bucket.getDocCount(), equalTo(1L));
InternalAggregations children = bucket.getAggregations();
assertThat(children.asList().size(), equalTo(1));
assertThat(children.asList().get(0).getName(), equalTo("the_max"));
assertThat(((Max) (children.asList().get(0))).value(), equalTo(1.0));
});
}
public void testEmpty() throws IOException {
Query query = new MatchAllDocsQuery();
testSearchCase(
query,
Collections.emptyList(),
aggregation -> aggregation.field(LONG_FIELD).maxDocCount(1),
agg -> assertEquals(0, agg.getBuckets().size())
);
testSearchCase(
query,
Collections.emptyList(),
aggregation -> aggregation.field(KEYWORD_FIELD).maxDocCount(1),
agg -> assertEquals(0, agg.getBuckets().size())
);
testSearchCase(
query,
Collections.emptyList(),
aggregation -> aggregation.field(LONG_FIELD).maxDocCount(1),
agg -> assertEquals(0, agg.getBuckets().size())
);
testSearchCase(
query,
Collections.emptyList(),
aggregation -> aggregation.field(KEYWORD_FIELD).maxDocCount(1),
agg -> assertEquals(0, agg.getBuckets().size())
);
}
public void testUnmapped() throws Exception {
try (Directory directory = newDirectory()) {
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
Document document = new Document();
document.add(new SortedDocValuesField("string", new BytesRef("a")));
document.add(new NumericDocValuesField("long", 0L));
indexWriter.addDocument(document);
MappedFieldType fieldType1 = new KeywordFieldMapper.KeywordFieldType("another_string");
MappedFieldType fieldType2 = new NumberFieldMapper.NumberFieldType("another_long", NumberFieldMapper.NumberType.LONG);
try (DirectoryReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) {
String[] fieldNames = new String[] { "string", "long" };
for (int i = 0; i < fieldNames.length; i++) {
RareTermsAggregationBuilder aggregationBuilder = new RareTermsAggregationBuilder("_name").field(fieldNames[i]);
RareTerms result = searchAndReduce(indexReader, new AggTestConfig(aggregationBuilder, fieldType1, fieldType2));
assertEquals("_name", result.getName());
assertEquals(0, result.getBuckets().size());
}
}
}
}
}
public void testRangeField() throws Exception {
RangeType rangeType = RangeType.DOUBLE;
try (Directory directory = newDirectory()) {
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] {
new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket 0 5
new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -5, 0
new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 0, 5, 10
new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 40, 45
}) {
Document doc = new Document();
BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range));
doc.add(new BinaryDocValuesField("field", encodedRange));
indexWriter.addDocument(doc);
}
MappedFieldType fieldType = new RangeFieldMapper.RangeFieldType("field", rangeType);
try (DirectoryReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) {
RareTermsAggregationBuilder aggregationBuilder = new RareTermsAggregationBuilder("_name").field("field");
expectThrows(
IllegalArgumentException.class,
() -> searchAndReduce(indexReader, new AggTestConfig(aggregationBuilder, fieldType))
);
}
}
}
}
public void testNestedTerms() throws IOException {
Query query = new MatchAllDocsQuery();
testSearchCase(query, dataset, aggregation -> {
TermsAggregationBuilder terms = new TermsAggregationBuilder("the_terms").field(KEYWORD_FIELD);
aggregation.field(LONG_FIELD).maxDocCount(1).subAggregation(terms);
}, agg -> {
assertEquals(1, agg.getBuckets().size());
LongRareTerms.Bucket bucket = (LongRareTerms.Bucket) agg.getBuckets().get(0);
assertThat(bucket.getKey(), equalTo(1L));
assertThat(bucket.getDocCount(), equalTo(1L));
InternalAggregations children = bucket.getAggregations();
assertThat(children.asList().size(), equalTo(1));
assertThat(children.asList().get(0).getName(), equalTo("the_terms"));
assertThat(((Terms) (children.asList().get(0))).getBuckets().size(), equalTo(1));
assertThat(((Terms) (children.asList().get(0))).getBuckets().get(0).getKeyAsString(), equalTo("1"));
});
testSearchCase(query, dataset, aggregation -> {
TermsAggregationBuilder terms = new TermsAggregationBuilder("the_terms").field(KEYWORD_FIELD);
aggregation.field(KEYWORD_FIELD).maxDocCount(1).subAggregation(terms);
}, agg -> {
assertEquals(1, agg.getBuckets().size());
StringRareTerms.Bucket bucket = (StringRareTerms.Bucket) agg.getBuckets().get(0);
assertThat(bucket.getKey(), equalTo("1"));
assertThat(bucket.getDocCount(), equalTo(1L));
InternalAggregations children = bucket.getAggregations();
assertThat(children.asList().size(), equalTo(1));
assertThat(children.asList().get(0).getName(), equalTo("the_terms"));
assertThat(((Terms) (children.asList().get(0))).getBuckets().size(), equalTo(1));
assertThat(((Terms) (children.asList().get(0))).getBuckets().get(0).getKeyAsString(), equalTo("1"));
});
}
public void testInsideTerms() throws IOException {
for (String field : new String[] { KEYWORD_FIELD, LONG_FIELD }) {
AggregationBuilder builder = new TermsAggregationBuilder("terms").field("even_odd")
.subAggregation(new RareTermsAggregationBuilder("rare").field(field).maxDocCount(2));
StringTerms terms = executeTestCase(new MatchAllDocsQuery(), dataset, builder);
StringTerms.Bucket even = terms.getBucketByKey("even");
InternalRareTerms<?, ?> evenRare = even.getAggregations().get("rare");
assertEquals(evenRare.getBuckets().stream().map(InternalRareTerms.Bucket::getKeyAsString).collect(toList()), List.of("2"));
assertEquals(evenRare.getBuckets().stream().map(InternalRareTerms.Bucket::getDocCount).collect(toList()), List.of(2L));
StringTerms.Bucket odd = terms.getBucketByKey("odd");
InternalRareTerms<?, ?> oddRare = odd.getAggregations().get("rare");
assertEquals(oddRare.getBuckets().stream().map(InternalRareTerms.Bucket::getKeyAsString).collect(toList()), List.of("1"));
assertEquals(oddRare.getBuckets().stream().map(InternalRareTerms.Bucket::getDocCount).collect(toList()), List.of(1L));
}
}
public void testWithNestedAggregations() throws IOException {
try (Directory directory = newDirectory()) {
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
for (int i = 0; i < 10; i++) {
int[] nestedValues = new int[i];
for (int j = 0; j < i; j++) {
nestedValues[j] = j;
}
indexWriter.addDocuments(generateDocsWithNested(Integer.toString(i), i, nestedValues));
}
indexWriter.commit();
NestedAggregationBuilder nested = new NestedAggregationBuilder("nested", "nested_object").subAggregation(
new RareTermsAggregationBuilder("terms").field("nested_value").maxDocCount(1)
);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("nested_value", NumberFieldMapper.NumberType.LONG);
try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
AggTestConfig aggTestConfig = new AggTestConfig(nested, fieldType).withQuery(new FieldExistsQuery(PRIMARY_TERM_NAME));
// match root document only
InternalNested result = searchAndReduce(indexReader, aggTestConfig);
InternalMultiBucketAggregation<?, ?> terms = result.getAggregations().get("terms");
assertThat(terms.getBuckets().size(), equalTo(1));
assertThat(terms.getBuckets().get(0).getKeyAsString(), equalTo("8"));
}
}
}
}
public void testWithNestedScoringAggregations() throws IOException {
try (Directory directory = newDirectory()) {
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
for (int i = 0; i < 10; i++) {
int[] nestedValues = new int[i];
for (int j = 0; j < i; j++) {
nestedValues[j] = j;
}
indexWriter.addDocuments(generateDocsWithNested(Integer.toString(i), i, nestedValues));
}
indexWriter.commit();
for (boolean withScore : new boolean[] { true, false }) {
NestedAggregationBuilder nested = new NestedAggregationBuilder("nested", "nested_object").subAggregation(
new RareTermsAggregationBuilder("terms").field("nested_value")
.maxDocCount(2)
.subAggregation(
new TopHitsAggregationBuilder("top_hits").sort(
withScore ? new ScoreSortBuilder() : new FieldSortBuilder("_doc")
).storedField("_none_")
)
);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType("nested_value", NumberFieldMapper.NumberType.LONG);
try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
if (withScore) {
IllegalStateException e = expectThrows(IllegalStateException.class, () -> {
// match root document only
AggTestConfig aggTestConfig = new AggTestConfig(nested, fieldType).withQuery(
new FieldExistsQuery(PRIMARY_TERM_NAME)
);
searchAndReduce(indexReader, aggTestConfig);
});
assertThat(
e.getMessage(),
equalTo(
"RareTerms agg [terms] is the child of the nested agg [nested], and also has a scoring "
+ "child agg [top_hits]. This combination is not supported because it requires "
+ "executing in [depth_first] mode, which the RareTerms agg cannot do."
)
);
} else {
AggTestConfig aggTestConfig = new AggTestConfig(nested, fieldType).withQuery(
new FieldExistsQuery(PRIMARY_TERM_NAME)
);
// match root document only
InternalNested result = searchAndReduce(indexReader, aggTestConfig);
InternalMultiBucketAggregation<?, ?> terms = result.getAggregations().get("terms");
assertThat(terms.getBuckets().size(), equalTo(2));
long counter = 1;
for (MultiBucketsAggregation.Bucket bucket : terms.getBuckets()) {
InternalTopHits topHits = bucket.getAggregations().get("top_hits");
TotalHits hits = topHits.getHits().getTotalHits();
assertNotNull(hits);
assertThat(hits.value(), equalTo(counter));
assertThat(topHits.getHits().getMaxScore(), equalTo(Float.NaN));
counter += 1;
}
}
}
}
}
}
}
private final SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID(
SeqNoFieldMapper.SeqNoIndexOptions.POINTS_AND_DOC_VALUES
);
private List<Iterable<IndexableField>> generateDocsWithNested(String id, int value, int[] nestedValues) {
List<Iterable<IndexableField>> documents = new ArrayList<>();
for (int nestedValue : nestedValues) {
Document document = new Document();
document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId(id), Field.Store.NO));
document.add(new StringField(NestedPathFieldMapper.NAME, "nested_object", Field.Store.NO));
document.add(new SortedNumericDocValuesField("nested_value", nestedValue));
documents.add(document);
}
LuceneDocument document = new LuceneDocument();
document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId(id), Field.Store.YES));
document.add(new StringField(NestedPathFieldMapper.NAME, "docs", Field.Store.NO));
document.add(new SortedNumericDocValuesField("value", value));
sequenceIDFields.addFields(document);
documents.add(document);
return documents;
}
@Override
protected IndexSettings createIndexSettings() {
Settings nodeSettings = Settings.builder().put("search.max_buckets", 100000).build();
return new IndexSettings(
IndexMetadata.builder("_index")
.settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(0)
.creationDate(System.currentTimeMillis())
.build(),
nodeSettings
);
}
private void testSearchCase(
Query query,
List<Long> dataset,
Consumer<RareTermsAggregationBuilder> configure,
Consumer<InternalMappedRareTerms<?, ?>> verify
) throws IOException {
RareTermsAggregationBuilder aggregationBuilder = new RareTermsAggregationBuilder("_name");
if (configure != null) {
configure.accept(aggregationBuilder);
}
verify.accept(executeTestCase(query, dataset, aggregationBuilder));
}
private <A extends InternalAggregation> A executeTestCase(Query query, List<Long> dataset, AggregationBuilder aggregationBuilder)
throws IOException {
try (Directory directory = newDirectory()) {
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
Document document = new Document();
List<Long> shuffledDataset = new ArrayList<>(dataset);
Collections.shuffle(shuffledDataset, random());
for (Long value : shuffledDataset) {
document.add(new SortedNumericDocValuesField(LONG_FIELD, value));
document.add(new LongPoint(LONG_FIELD, value));
document.add(new Field(KEYWORD_FIELD, new BytesRef(Long.toString(value)), KeywordFieldMapper.Defaults.FIELD_TYPE));
document.add(
new Field("even_odd", new BytesRef(value % 2 == 0 ? "even" : "odd"), KeywordFieldMapper.Defaults.FIELD_TYPE)
);
indexWriter.addDocument(document);
document.clear();
}
}
try (DirectoryReader indexReader = DirectoryReader.open(directory)) {
MappedFieldType[] types = new MappedFieldType[] {
keywordField(KEYWORD_FIELD),
longField(LONG_FIELD),
keywordField("even_odd") };
return searchAndReduce(indexReader, new AggTestConfig(aggregationBuilder, types).withQuery(query));
}
}
}
@Override
public void doAssertReducedMultiBucketConsumer(Aggregation agg, MultiBucketConsumerService.MultiBucketConsumer bucketConsumer) {
/*
* No-op.
*
* This is used in the aggregator tests to check that after a reduction, we have the correct number of buckets.
* This can be done during incremental reduces, and the final reduce. Unfortunately, the number of buckets
* can _decrease_ during runtime as values are reduced together (e.g. 1 count on each shard, but when
* reduced it becomes 2 and is greater than the threshold).
*
* Because the incremental reduction test picks random subsets to reduce together, it's impossible
* to predict how the buckets will end up, and so this assertion will fail.
*
* If we want to put this assertion back in, we'll need this test to override the incremental reduce
* portion so that we can deterministically know which shards are being reduced together and which
* buckets we should have left after each reduction.
*/
}
@Override
protected List<ObjectMapper> objectMappers() {
return List.of(NestedAggregatorTests.nestedObject("nested_object"));
}
}
|
RareTermsAggregatorTests
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/KubernetesPersistentVolumesClaimsComponentBuilderFactory.java
|
{
"start": 2204,
"end": 5007
}
|
interface ____ extends ComponentBuilder<KubernetesPersistentVolumesClaimsComponent> {
/**
* To use an existing kubernetes client.
*
* The option is a:
* <code>io.fabric8.kubernetes.client.KubernetesClient</code> type.
*
* Group: producer
*
* @param kubernetesClient the value to set
* @return the dsl builder
*/
default KubernetesPersistentVolumesClaimsComponentBuilder kubernetesClient(io.fabric8.kubernetes.client.KubernetesClient kubernetesClient) {
doSetProperty("kubernetesClient", kubernetesClient);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default KubernetesPersistentVolumesClaimsComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default KubernetesPersistentVolumesClaimsComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
|
KubernetesPersistentVolumesClaimsComponentBuilder
|
java
|
apache__kafka
|
connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java
|
{
"start": 3971,
"end": 13926
}
|
class ____ {
private static final Logger log = LoggerFactory.getLogger(ErrorHandlingIntegrationTest.class);
private static final String DLQ_TOPIC = "my-connector-errors";
private static final String CONNECTOR_NAME = "error-conn";
private static final String TASK_ID = "error-conn-0";
private static final int NUM_RECORDS_PRODUCED = 1000;
private static final int NUM_TASKS = 1;
private static final long CONNECTOR_SETUP_DURATION_MS = TimeUnit.SECONDS.toMillis(60);
private static final long CONSUME_MAX_DURATION_MS = TimeUnit.SECONDS.toMillis(30);
private EmbeddedConnectCluster connect;
private ConnectorHandle connectorHandle;
@BeforeEach
public void setup() throws InterruptedException {
// setup Connect cluster with defaults
connect = new EmbeddedConnectCluster.Builder().build();
// start Connect cluster
connect.start();
// get connector handles before starting test.
connectorHandle = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME);
}
@AfterEach
public void close() {
RuntimeHandles.get().deleteConnector(CONNECTOR_NAME);
connect.stop();
}
@Test
public void testSkipRetryAndDLQWithHeaders() throws Exception {
// create test topic
connect.kafka().createTopic("test-topic");
// setup connector config
Map<String, String> props = new HashMap<>();
props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName());
props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS));
props.put(TOPICS_CONFIG, "test-topic");
props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName());
props.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName());
props.put(TRANSFORMS_CONFIG, "failing_transform");
props.put("transforms.failing_transform.type", FaultyPassthrough.class.getName());
// log all errors, along with message metadata
props.put(ERRORS_LOG_ENABLE_CONFIG, "true");
props.put(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG, "true");
// produce bad messages into dead letter queue
props.put(DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC);
props.put(DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, "true");
props.put(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, "1");
// tolerate all errors
props.put(ERRORS_TOLERANCE_CONFIG, "all");
// retry for up to one second
props.put(ERRORS_RETRY_TIMEOUT_CONFIG, "1000");
// set expected records to successfully reach the task
connectorHandle.taskHandle(TASK_ID).expectedRecords(NUM_RECORDS_PRODUCED - FaultyPassthrough.EXPECTED_INCORRECT_RECORDS);
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, NUM_TASKS,
"Connector tasks did not start in time.");
waitForCondition(this::checkForPartitionAssignment,
CONNECTOR_SETUP_DURATION_MS,
"Connector task was not assigned a partition.");
// produce some strings into test topic
for (int i = 0; i < NUM_RECORDS_PRODUCED; i++) {
connect.kafka().produce("test-topic", "key-" + i, "value-" + i);
}
// consume all records from test topic
log.info("Consuming records from test topic");
int i = 0;
for (ConsumerRecord<byte[], byte[]> rec : connect.kafka().consume(NUM_RECORDS_PRODUCED, CONSUME_MAX_DURATION_MS, "test-topic")) {
String k = new String(rec.key());
String v = new String(rec.value());
log.debug("Consumed record (key='{}', value='{}') from topic {}", k, v, rec.topic());
assertEquals(k, "key-" + i, "Unexpected key");
assertEquals(v, "value-" + i, "Unexpected value");
i++;
}
// wait for records to reach the task
connectorHandle.taskHandle(TASK_ID).awaitRecords(CONSUME_MAX_DURATION_MS);
// consume failed records from dead letter queue topic
log.info("Consuming records from test topic");
ConsumerRecords<byte[], byte[]> messages = connect.kafka().consume(FaultyPassthrough.EXPECTED_INCORRECT_RECORDS, CONSUME_MAX_DURATION_MS, DLQ_TOPIC);
for (ConsumerRecord<byte[], byte[]> recs : messages) {
log.debug("Consumed record (key={}, value={}) from dead letter queue topic {}",
new String(recs.key()), new String(recs.value()), DLQ_TOPIC);
assertTrue(recs.headers().toArray().length > 0);
assertValue("test-topic", recs.headers(), ERROR_HEADER_ORIG_TOPIC);
assertValue(RetriableException.class.getName(), recs.headers(), ERROR_HEADER_EXCEPTION);
assertValue("Error when value='value-7'", recs.headers(), ERROR_HEADER_EXCEPTION_MESSAGE);
}
connect.deleteConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorDoesNotExist(CONNECTOR_NAME,
"Connector wasn't deleted in time.");
}
@Test
public void testErrantRecordReporter() throws Exception {
// create test topic
connect.kafka().createTopic("test-topic");
// setup connector config
Map<String, String> props = new HashMap<>();
props.put(CONNECTOR_CLASS_CONFIG, ErrantRecordSinkConnector.class.getSimpleName());
props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS));
props.put(TOPICS_CONFIG, "test-topic");
// Restrict the size of each poll so that the records are delivered across multiple polls
props.put(CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX + ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "5");
props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName());
props.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName());
// log all errors, along with message metadata
props.put(ERRORS_LOG_ENABLE_CONFIG, "true");
props.put(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG, "true");
// produce bad messages into dead letter queue
props.put(DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC);
props.put(DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, "true");
props.put(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, "1");
// tolerate all errors
props.put(ERRORS_TOLERANCE_CONFIG, "all");
// retry for up to one second
props.put(ERRORS_RETRY_TIMEOUT_CONFIG, "1000");
// set expected records to successfully reach the task
connectorHandle.taskHandle(TASK_ID).expectedRecords(NUM_RECORDS_PRODUCED);
connect.configureConnector(CONNECTOR_NAME, props);
connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, NUM_TASKS,
"Connector tasks did not start in time.");
waitForCondition(this::checkForPartitionAssignment,
CONNECTOR_SETUP_DURATION_MS,
"Connector task was not assigned a partition.");
// produce some strings into test topic
for (int i = 0; i < NUM_RECORDS_PRODUCED; i++) {
connect.kafka().produce("test-topic", "key-" + i, "value-" + i);
}
// consume all records from test topic
log.info("Consuming records from test topic");
int i = 0;
for (ConsumerRecord<byte[], byte[]> rec : connect.kafka().consume(NUM_RECORDS_PRODUCED, CONSUME_MAX_DURATION_MS, "test-topic")) {
String k = new String(rec.key());
String v = new String(rec.value());
log.debug("Consumed record (key='{}', value='{}') from topic {}", k, v, rec.topic());
assertEquals(k, "key-" + i, "Unexpected key");
assertEquals(v, "value-" + i, "Unexpected value");
i++;
}
// wait for records to reach the task
connectorHandle.taskHandle(TASK_ID).awaitRecords(CONSUME_MAX_DURATION_MS);
// consume failed records from dead letter queue topic
log.info("Consuming records from test topic");
Set<String> keys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> rec : connect.kafka().consume(NUM_RECORDS_PRODUCED, CONSUME_MAX_DURATION_MS, DLQ_TOPIC)) {
String k = new String(rec.key());
keys.add(k);
}
assertEquals(NUM_RECORDS_PRODUCED, keys.size());
connect.deleteConnector(CONNECTOR_NAME);
connect.assertions().assertConnectorDoesNotExist(CONNECTOR_NAME,
"Connector wasn't deleted in time.");
}
/**
* Check if a partition was assigned to each task. This method swallows exceptions since it is invoked from a
* {@link org.apache.kafka.test.TestUtils#waitForCondition} that will throw an error if this method continued
* to return false after the specified duration has elapsed.
*
* @return true if each task was assigned a partition each, false if this was not true or an error occurred when
* executing this operation.
*/
private boolean checkForPartitionAssignment() {
try {
ConnectorStateInfo info = connect.connectorStatus(CONNECTOR_NAME);
return info != null && info.tasks().size() == NUM_TASKS
&& connectorHandle.taskHandle(TASK_ID).numPartitionsAssigned() == 1;
} catch (Exception e) {
// Log the exception and return that the partitions were not assigned
log.error("Could not check connector state info.", e);
return false;
}
}
private void assertValue(String expected, Headers headers, String headerKey) {
byte[] actual = headers.lastHeader(headerKey).value();
if (expected == null && actual == null) {
return;
}
if (expected == null || actual == null) {
fail();
}
assertEquals(expected, new String(actual));
}
public static
|
ErrorHandlingIntegrationTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/model/internal/SecondaryTableFromAnnotationSecondPass.java
|
{
"start": 296,
"end": 839
}
|
class ____ implements SecondPass {
private final EntityBinder entityBinder;
private final PropertyHolder propertyHolder;
public SecondaryTableFromAnnotationSecondPass(
EntityBinder entityBinder,
PropertyHolder propertyHolder) {
this.entityBinder = entityBinder;
this.propertyHolder = propertyHolder;
}
@Override
public void doSecondPass(Map<String, PersistentClass> persistentClasses) throws MappingException {
entityBinder.finalSecondaryTableFromAnnotationBinding( propertyHolder );
}
}
|
SecondaryTableFromAnnotationSecondPass
|
java
|
spring-projects__spring-boot
|
module/spring-boot-cloudfoundry/src/main/java/org/springframework/boot/cloudfoundry/autoconfigure/actuate/endpoint/CloudFoundryWebEndpointDiscoverer.java
|
{
"start": 4227,
"end": 4543
}
|
class ____ implements RuntimeHintsRegistrar {
@Override
public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) {
hints.reflection()
.registerType(CloudFoundryEndpointFilter.class, MemberCategory.INVOKE_DECLARED_CONSTRUCTORS);
}
}
}
|
CloudFoundryWebEndpointDiscovererRuntimeHints
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/onetoone/bidirectional/BidirectionalOneToOneWithIdClassesTest.java
|
{
"start": 1869,
"end": 1994
}
|
class ____ {
@Id
private String operatorId;
}
@Entity( name = "Price" )
@IdClass( PricePK.class )
public static
|
Operator
|
java
|
spring-projects__spring-boot
|
module/spring-boot-web-server/src/testFixtures/java/org/springframework/boot/web/server/servlet/MockServletWebServer.java
|
{
"start": 1663,
"end": 5829
}
|
class ____ implements WebServer {
private ServletContext servletContext;
private final Initializer[] initializers;
private final List<RegisteredServlet> registeredServlets = new ArrayList<>();
private final List<RegisteredFilter> registeredFilters = new ArrayList<>();
private final Map<String, FilterRegistration> filterRegistrations = new HashMap<>();
private final Map<String, ServletRegistration> servletRegistrations = new HashMap<>();
private final int port;
MockServletWebServer(ServletContextInitializers initializers, int port) {
this(StreamSupport.stream(initializers.spliterator(), false)
.map((initializer) -> (Initializer) initializer::onStartup)
.toArray(Initializer[]::new), port);
}
MockServletWebServer(Initializer[] initializers, int port) {
this.initializers = initializers;
this.port = port;
initialize();
}
@SuppressWarnings("unchecked")
private void initialize() {
try {
this.servletContext = mock(ServletContext.class);
lenient().doAnswer((invocation) -> {
RegisteredServlet registeredServlet = new RegisteredServlet(invocation.getArgument(1));
MockServletWebServer.this.registeredServlets.add(registeredServlet);
this.servletRegistrations.put(invocation.getArgument(0), registeredServlet.getRegistration());
return registeredServlet.getRegistration();
}).when(this.servletContext).addServlet(anyString(), any(Servlet.class));
lenient().doAnswer((invocation) -> {
RegisteredFilter registeredFilter = new RegisteredFilter(invocation.getArgument(1));
MockServletWebServer.this.registeredFilters.add(registeredFilter);
this.filterRegistrations.put(invocation.getArgument(0), registeredFilter.getRegistration());
return registeredFilter.getRegistration();
}).when(this.servletContext).addFilter(anyString(), any(Filter.class));
final SessionCookieConfig sessionCookieConfig = new MockSessionCookieConfig();
given(this.servletContext.getSessionCookieConfig()).willReturn(sessionCookieConfig);
final Map<String, String> initParameters = new HashMap<>();
lenient().doAnswer((invocation) -> {
initParameters.put(invocation.getArgument(0), invocation.getArgument(1));
return null;
}).when(this.servletContext).setInitParameter(anyString(), anyString());
given(this.servletContext.getInitParameterNames())
.willReturn(Collections.enumeration(initParameters.keySet()));
lenient().doAnswer((invocation) -> initParameters.get(invocation.getArgument(0)))
.when(this.servletContext)
.getInitParameter(anyString());
given(this.servletContext.getAttributeNames()).willReturn(Collections.emptyEnumeration());
lenient().when((Map<String, FilterRegistration>) this.servletContext.getFilterRegistrations())
.thenReturn(this.filterRegistrations);
lenient().when((Map<String, ServletRegistration>) this.servletContext.getServletRegistrations())
.thenReturn(this.servletRegistrations);
for (Initializer initializer : this.initializers) {
initializer.onStartup(this.servletContext);
}
}
catch (ServletException ex) {
throw new RuntimeException(ex);
}
}
@Override
public void start() throws WebServerException {
}
@Override
public void stop() {
this.servletContext = null;
this.registeredServlets.clear();
this.filterRegistrations.clear();
this.registeredFilters.clear();
}
public ServletContext getServletContext() {
return this.servletContext;
}
public Servlet[] getServlets() {
Servlet[] servlets = new Servlet[this.registeredServlets.size()];
Arrays.setAll(servlets, (i) -> this.registeredServlets.get(i).getServlet());
return servlets;
}
public RegisteredServlet getRegisteredServlet(int index) {
return getRegisteredServlets().get(index);
}
public List<RegisteredServlet> getRegisteredServlets() {
return this.registeredServlets;
}
public RegisteredFilter getRegisteredFilters(int index) {
return getRegisteredFilters().get(index);
}
public List<RegisteredFilter> getRegisteredFilters() {
return this.registeredFilters;
}
@Override
public int getPort() {
return this.port;
}
/**
* A registered servlet.
*/
public static
|
MockServletWebServer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/QueryToManyWithNestedToOneTest.java
|
{
"start": 1340,
"end": 3528
}
|
class ____ {
@Test
public void testCriteriaQuery(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final CriteriaBuilder cb = session.getCriteriaBuilder();
final CriteriaQuery<ParentEntity> cq = cb.createQuery( ParentEntity.class );
final Root<ParentEntity> root = cq.from( ParentEntity.class );
final SetJoin<ParentEntity, ValueEntity> valuesJoin = root.joinSet( "values" );
final Join<ValueEntity, KeyEntity> key = valuesJoin.join( "key" );
final ParentEntity result = session.createQuery(
cq.where( key.get( "keyValue" ).in( "key_1", "key_2" ) )
).getSingleResult();
assertThat( result.getValues() ).hasSize( 2 )
.extracting( ValueEntity::getKey )
.extracting( KeyEntity::getKeyValue )
.containsOnly( "key_1", "key_2" );
} );
}
@Test
public void testHQLQuery(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final ParentEntity result = session.createQuery(
"from ParentEntity p join p.values values join values.key key where key.keyValue in ('key_3')",
ParentEntity.class
).getSingleResult();
assertThat( result.getValues() ).hasSize( 1 )
.extracting( ValueEntity::getKey )
.extracting( KeyEntity::getKeyValue )
.containsOnly( "key_3" );
} );
}
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final ParentEntity parent1 = new ParentEntity();
final ValueEntity value1 = new ValueEntity( parent1, new KeyEntity( "key_1" ) );
session.persist( value1 );
final ValueEntity value2 = new ValueEntity( parent1, new KeyEntity( "key_2" ) );
session.persist( value2 );
final ValueEntity value3 = new ValueEntity( new ParentEntity(), new KeyEntity( "key_3" ) );
session.persist( value3 );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from ValueEntity" ).executeUpdate();
session.createMutationQuery( "delete from ParentEntity" ).executeUpdate();
session.createMutationQuery( "delete from KeyEntity" ).executeUpdate();
} );
}
@Entity( name = "ParentEntity" )
static
|
QueryToManyWithNestedToOneTest
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/tools/Generate.java
|
{
"start": 3718,
"end": 6042
}
|
interface ____ convenience methods for%n"
+ " * %s%n"
+ " * <p>Compatible with Log4j 2.6 or higher.</p>%n"
+ " */%n"
+ "public final class %s implements Serializable {%n"
+ " private static final long serialVersionUID = " + System.nanoTime() + "L;%n"
+ " private final ExtendedLoggerWrapper logger;%n"
+ "%n";
// @formatter:on
}
@Override
String constructor() {
// @formatter:off
return ""
+ "%n"
+ " private %s(final Logger logger) {%n"
+ " this.logger = new ExtendedLoggerWrapper((AbstractLogger) logger, logger.getName(), "
+ "logger.getMessageFactory());%n"
+ " }%n";
// @formatter:on
}
@Override
Class<?> generator() {
return CustomLogger.class;
}
},
EXTEND {
@Override
String imports() {
// @formatter:off
return ""
+ "import org.apache.logging.log4j.Level;%n"
+ "import org.apache.logging.log4j.LogManager;%n"
+ "import org.apache.logging.log4j.Logger;%n"
+ "import org.apache.logging.log4j.Marker;%n"
+ "import org.apache.logging.log4j.message.Message;%n"
+ "import org.apache.logging.log4j.message.MessageFactory;%n"
+ "import org.apache.logging.log4j.spi.AbstractLogger;%n"
+ "import org.apache.logging.log4j.spi.ExtendedLoggerWrapper;%n"
+ "import org.apache.logging.log4j.util.MessageSupplier;%n"
+ "import org.apache.logging.log4j.util.Supplier;%n"
+ "%n";
// @formatter:on
}
@Override
String declaration() {
// @formatter:off
return ""
+ "/**%n"
+ " * Extended Logger
|
with
|
java
|
reactor__reactor-core
|
reactor-core/src/jcstress/java/reactor/core/publisher/FluxSwitchOnFirstStressTest.java
|
{
"start": 11811,
"end": 13705
}
|
class ____
extends FluxSwitchOnFirstStressTest {
static final RuntimeException DUMMY_ERROR_1 = new RuntimeException("dummy1");
static final RuntimeException DUMMY_ERROR_2 = new RuntimeException("dummy2");
@Override
Flux<String> switchOnFirst(Signal<? extends String> signal,
Flux<String> inbound) {
throw DUMMY_ERROR_2;
}
@Actor
public void errorInbound() {
main.onError(DUMMY_ERROR_1);
}
@Actor
public void cancelOutbound() {
outboundSubscriber.cancel();
}
@Arbiter
public void arbiter(LLLL_Result result) {
result.r1 = inboundSubscription.cancelled ? 1 : 0;
result.r2 =
outboundSubscriber.onCompleteCalls.get() + outboundSubscriber.onErrorCalls.get() * 2 + outboundSubscriber.droppedErrors.size() * 3;
result.r3 = outboundSubscriber.onNextCalls;
result.r4 = outboundSubscriber.onNextDiscarded;
if (outboundSubscriber.concurrentOnError.get()) {
throw new RuntimeException("Concurrent OnError");
}
if (outboundSubscriber.concurrentOnNext.get()) {
throw new RuntimeException("Concurrent OnNext");
}
if (outboundSubscriber.concurrentOnComplete.get()) {
throw new RuntimeException("Concurrent OnComplete");
}
}
}
// Ignore, flaky test (https://github.com/reactor/reactor-core/issues/3627)
//@JCStressTest
@Outcome(id = {
"0, 0, 1, 2, 2, 1, 0, 1, 0"}, expect = ACCEPTABLE, desc = "inbound request happened first. then inbound cancel. then outbound cancel")
@Outcome(id = {
"0, 0, 1, 2, 2, 1, 2, 1, 0"}, expect = ACCEPTABLE, desc = "inbound request happened first. then outbound cancel with error")
@Outcome(id = {
"0, 0, 1, 1, 1, 1, 0, 0, 1"}, expect = ACCEPTABLE, desc = "inbound cancel first")
@Outcome(id = {
"0, 0, 1, 1, 1, 1, 2, 0, 1"}, expect = ACCEPTABLE, desc = "outbound cancel with error first")
@State
public static
|
InboundCompleteLeadingToErrorAndOutboundCancelStressTest
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/util/KeyValuePair.java
|
{
"start": 1347,
"end": 2276
}
|
class ____ {
/**
* The empty array.
*/
public static final KeyValuePair[] EMPTY_ARRAY = {};
private final String key;
private final String value;
/**
* Constructs a key/value pair. The constructor should only be called from test classes.
* @param key The key.
* @param value The value.
*/
public KeyValuePair(final String key, final String value) {
this.key = key;
this.value = value;
}
/**
* Returns the key.
* @return the key.
*/
public String getKey() {
return key;
}
/**
* Returns the value.
* @return The value.
*/
public String getValue() {
return value;
}
@Override
public String toString() {
return key + '=' + value;
}
@PluginBuilderFactory
public static Builder newBuilder() {
return new Builder();
}
public static
|
KeyValuePair
|
java
|
redisson__redisson
|
redisson-spring-data/redisson-spring-data-23/src/main/java/org/redisson/spring/data/connection/RedissonConnection.java
|
{
"start": 2466,
"end": 84285
}
|
class ____ extends AbstractRedisConnection {
private boolean closed;
protected final Redisson redisson;
CommandAsyncExecutor executorService;
private RedissonSubscription subscription;
public RedissonConnection(RedissonClient redisson) {
super();
this.redisson = (Redisson) redisson;
executorService = this.redisson.getCommandExecutor();
}
@Override
public void close() throws DataAccessException {
super.close();
if (isQueueing()) {
CommandBatchService es = (CommandBatchService) executorService;
if (!es.isExecuted()) {
discard();
}
}
closed = true;
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public Object getNativeConnection() {
return redisson;
}
@Override
public boolean isQueueing() {
if (executorService instanceof CommandBatchService) {
CommandBatchService es = (CommandBatchService) executorService;
return es.getOptions().getExecutionMode() == ExecutionMode.REDIS_WRITE_ATOMIC;
}
return false;
}
@Override
public boolean isPipelined() {
if (executorService instanceof CommandBatchService) {
CommandBatchService es = (CommandBatchService) executorService;
return es.getOptions().getExecutionMode() == ExecutionMode.IN_MEMORY || es.getOptions().getExecutionMode() == ExecutionMode.IN_MEMORY_ATOMIC;
}
return false;
}
public boolean isPipelinedAtomic() {
if (executorService instanceof CommandBatchService) {
CommandBatchService es = (CommandBatchService) executorService;
return es.getOptions().getExecutionMode() == ExecutionMode.IN_MEMORY_ATOMIC;
}
return false;
}
@Override
public void openPipeline() {
BatchOptions options = BatchOptions.defaults()
.executionMode(ExecutionMode.IN_MEMORY);
this.executorService = executorService.createCommandBatchService(options);
}
@Override
public List<Object> closePipeline() throws RedisPipelineException {
if (isPipelined()) {
CommandBatchService es = (CommandBatchService) executorService;
try {
BatchResult<?> result = es.execute();
filterResults(result);
if (isPipelinedAtomic()) {
return Arrays.<Object>asList((List<Object>) result.getResponses());
}
return (List<Object>) result.getResponses();
} catch (Exception ex) {
throw new RedisPipelineException(ex);
} finally {
resetConnection();
}
}
return Collections.emptyList();
}
@Override
public Object execute(String command, byte[]... args) {
for (Method method : this.getClass().getDeclaredMethods()) {
if (method.getName().equalsIgnoreCase(command)
&& Modifier.isPublic(method.getModifiers())
&& (method.getParameterTypes().length == args.length)) {
try {
Object t = execute(method, args);
if (t instanceof String) {
return ((String) t).getBytes();
}
return t;
} catch (IllegalArgumentException e) {
if (isPipelined()) {
throw new RedisPipelineException(e);
}
throw new InvalidDataAccessApiUsageException(e.getMessage(), e);
}
}
}
throw new UnsupportedOperationException();
}
private Object execute(Method method, byte[]... args) {
if (method.getParameterTypes().length > 0 && method.getParameterTypes()[0] == byte[][].class) {
return ReflectionUtils.invokeMethod(method, this, args);
}
if (args == null) {
return ReflectionUtils.invokeMethod(method, this);
}
return ReflectionUtils.invokeMethod(method, this, Arrays.asList(args).toArray());
}
<V> V syncFuture(RFuture<V> future) {
try {
return executorService.get(future);
} catch (Exception ex) {
throw transform(ex);
}
}
protected RuntimeException transform(Exception ex) {
DataAccessException exception = RedissonConnectionFactory.EXCEPTION_TRANSLATION.translate(ex);
if (exception != null) {
return exception;
}
return new RedisSystemException(ex.getMessage(), ex);
}
@Override
public Boolean exists(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.EXISTS, key);
}
@Override
public Long del(byte[]... keys) {
return write(keys[0], LongCodec.INSTANCE, RedisCommands.DEL, Arrays.asList(keys).toArray());
}
@Override
public Long unlink(byte[]... keys) {
return write(keys[0], LongCodec.INSTANCE, RedisCommands.UNLINK, Arrays.asList(keys).toArray());
}
private static final RedisStrictCommand<DataType> TYPE = new RedisStrictCommand<DataType>("TYPE", new DataTypeConvertor());
@Override
public DataType type(byte[] key) {
return read(key, StringCodec.INSTANCE, TYPE, key);
}
private static final RedisStrictCommand<Set<byte[]>> KEYS = new RedisStrictCommand<Set<byte[]>>("KEYS", new ObjectSetReplayDecoder<byte[]>());
@Override
public Set<byte[]> keys(byte[] pattern) {
if (isQueueing()) {
return read(null, ByteArrayCodec.INSTANCE, KEYS, pattern);
}
List<CompletableFuture<Set<byte[]>>> futures = executorService.readAllAsync(ByteArrayCodec.INSTANCE, KEYS, pattern);
CompletableFuture<Void> ff = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<Set<byte[]>> future = ff.thenApply(r -> {
return futures.stream().flatMap(f -> f.getNow(new HashSet<>()).stream()).collect(Collectors.toSet());
}).toCompletableFuture();
return sync(new CompletableFutureWrapper<>(future));
}
@Override
public Cursor<byte[]> scan(ScanOptions options) {
return new ScanCursor<byte[]>(0, options) {
private RedisClient client;
private Iterator<MasterSlaveEntry> entries = executorService.getConnectionManager().getEntrySet().iterator();
private MasterSlaveEntry entry = entries.next();
@Override
protected ScanIteration<byte[]> doScan(long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline / transaction mode.");
}
if (entry == null) {
return null;
}
List<Object> args = new ArrayList<Object>();
if (cursorId == 101010101010101010L) {
cursorId = 0;
}
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<byte[]>> f = executorService.readAsync(client, entry, ByteArrayCodec.INSTANCE, RedisCommands.SCAN, args.toArray());
ListScanResult<byte[]> res = syncFuture(f);
String pos = res.getPos();
client = res.getRedisClient();
if ("0".equals(pos)) {
if (entries.hasNext()) {
pos = "101010101010101010";
entry = entries.next();
client = null;
} else {
entry = null;
}
}
return new ScanIteration<byte[]>(Long.parseUnsignedLong(pos), res.getValues());
}
}.open();
}
@Override
public byte[] randomKey() {
if (isQueueing()) {
return read(null, ByteArrayCodec.INSTANCE, RedisCommands.RANDOM_KEY);
}
RFuture<byte[]> f = executorService.readRandomAsync(ByteArrayCodec.INSTANCE, RedisCommands.RANDOM_KEY);
return sync(f);
}
@Override
public void rename(byte[] oldName, byte[] newName) {
write(oldName, StringCodec.INSTANCE, RedisCommands.RENAME, oldName, newName);
}
@Override
public Boolean renameNX(byte[] oldName, byte[] newName) {
return write(oldName, StringCodec.INSTANCE, RedisCommands.RENAMENX, oldName, newName);
}
private static final RedisStrictCommand<Boolean> EXPIRE = new RedisStrictCommand<Boolean>("EXPIRE", new BooleanReplayConvertor());
@Override
public Boolean expire(byte[] key, long seconds) {
return write(key, StringCodec.INSTANCE, EXPIRE, key, seconds);
}
@Override
public Boolean pExpire(byte[] key, long millis) {
return write(key, StringCodec.INSTANCE, RedisCommands.PEXPIRE, key, millis);
}
private static final RedisStrictCommand<Boolean> EXPIREAT = new RedisStrictCommand<Boolean>("EXPIREAT", new BooleanReplayConvertor());
@Override
public Boolean expireAt(byte[] key, long unixTime) {
return write(key, StringCodec.INSTANCE, EXPIREAT, key, unixTime);
}
@Override
public Boolean pExpireAt(byte[] key, long unixTimeInMillis) {
return write(key, StringCodec.INSTANCE, RedisCommands.PEXPIREAT, key, unixTimeInMillis);
}
@Override
public Boolean persist(byte[] key) {
return write(key, StringCodec.INSTANCE, RedisCommands.PERSIST, key);
}
@Override
public Boolean move(byte[] key, int dbIndex) {
return write(key, StringCodec.INSTANCE, RedisCommands.MOVE, key, dbIndex);
}
private static final RedisStrictCommand<Long> TTL = new RedisStrictCommand<Long>("TTL");
@Override
public Long ttl(byte[] key) {
return read(key, StringCodec.INSTANCE, TTL, key);
}
protected <T> T sync(RFuture<T> f) {
if (isPipelined()) {
return null;
}
if (isQueueing()) {
((BatchPromise)f.toCompletableFuture()).getSentPromise().join();
return null;
}
return syncFuture(f);
}
@Override
public Long ttl(byte[] key, TimeUnit timeUnit) {
return read(key, StringCodec.INSTANCE, new RedisStrictCommand<Long>("TTL", new SecondsConvertor(timeUnit, TimeUnit.SECONDS)), key);
}
@Override
public Long pTtl(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.PTTL, key);
}
@Override
public Long pTtl(byte[] key, TimeUnit timeUnit) {
return read(key, StringCodec.INSTANCE, new RedisStrictCommand<Long>("PTTL", new SecondsConvertor(timeUnit, TimeUnit.MILLISECONDS)), key);
}
@Override
public List<byte[]> sort(byte[] key, SortParameters sortParams) {
List<Object> params = new ArrayList<Object>();
params.add(key);
if (sortParams != null) {
if (sortParams.getByPattern() != null) {
params.add("BY");
params.add(sortParams.getByPattern());
}
if (sortParams.getLimit() != null) {
params.add("LIMIT");
if (sortParams.getLimit().getStart() != -1) {
params.add(sortParams.getLimit().getStart());
}
if (sortParams.getLimit().getCount() != -1) {
params.add(sortParams.getLimit().getCount());
}
}
if (sortParams.getGetPattern() != null) {
for (byte[] pattern : sortParams.getGetPattern()) {
params.add("GET");
params.add(pattern);
}
}
if (sortParams.getOrder() != null) {
params.add(sortParams.getOrder());
}
Boolean isAlpha = sortParams.isAlphabetic();
if (isAlpha != null && isAlpha) {
params.add("ALPHA");
}
}
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.SORT_LIST, params.toArray());
}
private static final RedisCommand<Long> SORT_TO = new RedisCommand<Long>("SORT");
@Override
public Long sort(byte[] key, SortParameters sortParams, byte[] storeKey) {
List<Object> params = new ArrayList<Object>();
params.add(key);
if (sortParams != null) {
if (sortParams.getByPattern() != null) {
params.add("BY");
params.add(sortParams.getByPattern());
}
if (sortParams.getLimit() != null) {
params.add("LIMIT");
if (sortParams.getLimit().getStart() != -1) {
params.add(sortParams.getLimit().getStart());
}
if (sortParams.getLimit().getCount() != -1) {
params.add(sortParams.getLimit().getCount());
}
}
if (sortParams.getGetPattern() != null) {
for (byte[] pattern : sortParams.getGetPattern()) {
params.add("GET");
params.add(pattern);
}
}
if (sortParams.getOrder() != null) {
params.add(sortParams.getOrder());
}
Boolean isAlpha = sortParams.isAlphabetic();
if (isAlpha != null && isAlpha) {
params.add("ALPHA");
}
}
params.add("STORE");
params.add(storeKey);
return read(key, ByteArrayCodec.INSTANCE, SORT_TO, params.toArray());
}
@Override
public byte[] dump(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, key);
}
@Override
public void restore(byte[] key, long ttlInMillis, byte[] serializedValue) {
write(key, StringCodec.INSTANCE, RedisCommands.RESTORE, key, ttlInMillis, serializedValue);
}
@Override
public byte[] get(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key);
}
@Override
public byte[] getSet(byte[] key, byte[] value) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.GETSET, key, value);
}
private static final RedisCommand<List<Object>> MGET = new RedisCommand<List<Object>>("MGET", new ObjectListReplayDecoder<Object>());
@Override
public List<byte[]> mGet(byte[]... keys) {
return read(keys[0], ByteArrayCodec.INSTANCE, MGET, Arrays.asList(keys).toArray());
}
private static final RedisCommand<Boolean> SET = new RedisCommand<>("SET", new BooleanNullSafeReplayConvertor());
@Override
public Boolean set(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, SET, key, value);
}
@Override
public Boolean set(byte[] key, byte[] value, Expiration expiration, SetOption option) {
if (expiration == null) {
return set(key, value);
} else if (expiration.isPersistent()) {
if (option == null || option == SetOption.UPSERT) {
return set(key, value);
}
if (option == SetOption.SET_IF_ABSENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "NX");
}
if (option == SetOption.SET_IF_PRESENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "XX");
}
} else {
if (option == null || option == SetOption.UPSERT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "PX", expiration.getExpirationTimeInMilliseconds());
}
if (option == SetOption.SET_IF_ABSENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "PX", expiration.getExpirationTimeInMilliseconds(), "NX");
}
if (option == SetOption.SET_IF_PRESENT) {
return write(key, StringCodec.INSTANCE, SET, key, value, "PX", expiration.getExpirationTimeInMilliseconds(), "XX");
}
}
throw new IllegalArgumentException();
}
@Override
public Boolean setNX(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.SETNX, key, value);
}
private static final RedisCommand<Boolean> SETEX = new RedisCommand<Boolean>("SETEX", new BooleanReplayConvertor());
@Override
public Boolean setEx(byte[] key, long seconds, byte[] value) {
return write(key, StringCodec.INSTANCE, SETEX, key, seconds, value);
}
private static final RedisCommand<Boolean> PSETEX = new RedisCommand<Boolean>("PSETEX", new BooleanReplayConvertor());
@Override
public Boolean pSetEx(byte[] key, long milliseconds, byte[] value) {
return write(key, StringCodec.INSTANCE, PSETEX, key, milliseconds, value);
}
private static final RedisCommand<Boolean> MSET = new RedisCommand<Boolean>("MSET", new BooleanReplayConvertor());
@Override
public Boolean mSet(Map<byte[], byte[]> tuple) {
List<byte[]> params = convert(tuple);
return write(tuple.keySet().iterator().next(), StringCodec.INSTANCE, MSET, params.toArray());
}
protected List<byte[]> convert(Map<byte[], byte[]> tuple) {
List<byte[]> params = new ArrayList<byte[]>(tuple.size()*2);
for (Entry<byte[], byte[]> entry : tuple.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
return params;
}
@Override
public Boolean mSetNX(Map<byte[], byte[]> tuple) {
List<byte[]> params = convert(tuple);
return write(tuple.keySet().iterator().next(), StringCodec.INSTANCE, RedisCommands.MSETNX, params.toArray());
}
@Override
public Long incr(byte[] key) {
return write(key, StringCodec.INSTANCE, RedisCommands.INCR, key);
}
@Override
public Long incrBy(byte[] key, long value) {
return write(key, StringCodec.INSTANCE, RedisCommands.INCRBY, key, value);
}
@Override
public Double incrBy(byte[] key, double value) {
return write(key, StringCodec.INSTANCE, RedisCommands.INCRBYFLOAT, key, BigDecimal.valueOf(value).toPlainString());
}
@Override
public Long decr(byte[] key) {
return write(key, StringCodec.INSTANCE, RedisCommands.DECR, key);
}
private static final RedisStrictCommand<Long> DECRBY = new RedisStrictCommand<Long>("DECRBY");
@Override
public Long decrBy(byte[] key, long value) {
return write(key, StringCodec.INSTANCE, DECRBY, key, value);
}
private static final RedisStrictCommand<Long> APPEND = new RedisStrictCommand<Long>("APPEND");
@Override
public Long append(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, APPEND, key, value);
}
private static final RedisCommand<Object> GETRANGE = new RedisCommand<Object>("GETRANGE");
@Override
public byte[] getRange(byte[] key, long begin, long end) {
return read(key, ByteArrayCodec.INSTANCE, GETRANGE, key, begin, end);
}
private static final RedisCommand<Void> SETRANGE = new RedisCommand<Void>("SETRANGE", new VoidReplayConvertor());
@Override
public void setRange(byte[] key, byte[] value, long offset) {
write(key, ByteArrayCodec.INSTANCE, SETRANGE, key, offset, value);
}
@Override
public Boolean getBit(byte[] key, long offset) {
return read(key, StringCodec.INSTANCE, RedisCommands.GETBIT, key, offset);
}
@Override
public Boolean setBit(byte[] key, long offset, boolean value) {
return write(key, StringCodec.INSTANCE, RedisCommands.SETBIT, key, offset, value ? 1 : 0);
}
@Override
public Long bitCount(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.BITCOUNT, key);
}
@Override
public Long bitCount(byte[] key, long begin, long end) {
return read(key, StringCodec.INSTANCE, RedisCommands.BITCOUNT, key, begin, end);
}
private static final RedisStrictCommand<Long> BITOP = new RedisStrictCommand<Long>("BITOP");
@Override
public Long bitOp(BitOperation op, byte[] destination, byte[]... keys) {
if (op == BitOperation.NOT && keys.length > 1) {
throw new UnsupportedOperationException("NOT operation doesn't support more than single source key");
}
List<Object> params = new ArrayList<Object>(keys.length + 2);
params.add(op);
params.add(destination);
params.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, BITOP, params.toArray());
}
@Override
public Long strLen(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.STRLEN, key);
}
private static final RedisStrictCommand<Long> RPUSH = new RedisStrictCommand<Long>("RPUSH");
@Override
public Long rPush(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, RPUSH, args.toArray());
}
private static final RedisStrictCommand<Long> LPUSH = new RedisStrictCommand<Long>("LPUSH");
@Override
public Long lPush(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, LPUSH, args.toArray());
}
private static final RedisStrictCommand<Long> RPUSHX = new RedisStrictCommand<Long>("RPUSHX");
@Override
public Long rPushX(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, RPUSHX, key, value);
}
private static final RedisStrictCommand<Long> LPUSHX = new RedisStrictCommand<Long>("LPUSHX");
@Override
public Long lPushX(byte[] key, byte[] value) {
return write(key, StringCodec.INSTANCE, LPUSHX, key, value);
}
private static final RedisStrictCommand<Long> LLEN = new RedisStrictCommand<Long>("LLEN");
@Override
public Long lLen(byte[] key) {
return read(key, StringCodec.INSTANCE, LLEN, key);
}
@Override
public List<byte[]> lRange(byte[] key, long start, long end) {
return read(key, ByteArrayCodec.INSTANCE, LRANGE, key, start, end);
}
@Override
public void lTrim(byte[] key, long start, long end) {
write(key, StringCodec.INSTANCE, RedisCommands.LTRIM, key, start, end);
}
@Override
public byte[] lIndex(byte[] key, long index) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.LINDEX, key, index);
}
private static final RedisStrictCommand<Long> LINSERT = new RedisStrictCommand<Long>("LINSERT");
@Override
public Long lInsert(byte[] key, Position where, byte[] pivot, byte[] value) {
return write(key, StringCodec.INSTANCE, LINSERT, key, where, pivot, value);
}
private final List<String> commandsToRemove = Arrays.asList("SET",
"RESTORE", "LTRIM", "SETEX", "SETRANGE", "FLUSHDB", "LSET", "MSET", "HMSET", "RENAME");
private final List<Integer> indexToRemove = new ArrayList<Integer>();
private int index = -1;
<T> T write(byte[] key, Codec codec, RedisCommand<?> command, Object... params) {
RFuture<T> f = executorService.writeAsync(key, codec, command, params);
indexCommand(command);
return sync(f);
}
protected void indexCommand(RedisCommand<?> command) {
if (isQueueing() || isPipelined()) {
index++;
if (commandsToRemove.contains(command.getName())) {
indexToRemove.add(index);
}
}
}
<T> T read(byte[] key, Codec codec, RedisCommand<?> command, Object... params) {
RFuture<T> f = executorService.readAsync(key, codec, command, params);
indexCommand(command);
return sync(f);
}
@Override
public void lSet(byte[] key, long index, byte[] value) {
write(key, StringCodec.INSTANCE, RedisCommands.LSET, key, index, value);
}
private static final RedisStrictCommand<Long> LREM = new RedisStrictCommand<Long>("LREM");
@Override
public Long lRem(byte[] key, long count, byte[] value) {
return write(key, StringCodec.INSTANCE, LREM, key, count, value);
}
@Override
public byte[] lPop(byte[] key) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.LPOP, key);
}
@Override
public byte[] rPop(byte[] key) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.RPOP, key);
}
@Override
public List<byte[]> bLPop(int timeout, byte[]... keys) {
List<Object> params = new ArrayList<Object>(keys.length + 1);
params.addAll(Arrays.asList(keys));
params.add(timeout);
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.BLPOP, params.toArray());
}
@Override
public List<byte[]> bRPop(int timeout, byte[]... keys) {
List<Object> params = new ArrayList<Object>(keys.length + 1);
params.addAll(Arrays.asList(keys));
params.add(timeout);
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.BRPOP, params.toArray());
}
@Override
public byte[] rPopLPush(byte[] srcKey, byte[] dstKey) {
return write(srcKey, ByteArrayCodec.INSTANCE, RedisCommands.RPOPLPUSH, srcKey, dstKey);
}
@Override
public byte[] bRPopLPush(int timeout, byte[] srcKey, byte[] dstKey) {
return write(srcKey, ByteArrayCodec.INSTANCE, RedisCommands.BRPOPLPUSH, srcKey, dstKey, timeout);
}
private static final RedisCommand<Long> SADD = new RedisCommand<Long>("SADD");
@Override
public Long sAdd(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, SADD, args.toArray());
}
private static final RedisStrictCommand<Long> SREM = new RedisStrictCommand<Long>("SREM");
@Override
public Long sRem(byte[] key, byte[]... values) {
List<Object> args = new ArrayList<Object>(values.length + 1);
args.add(key);
args.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, SREM, args.toArray());
}
@Override
public byte[] sPop(byte[] key) {
return write(key, ByteArrayCodec.INSTANCE, RedisCommands.SPOP_SINGLE, key);
}
private static final RedisCommand<List<Object>> SPOP = new RedisCommand<List<Object>>("SPOP", new ObjectListReplayDecoder<Object>());
@Override
public List<byte[]> sPop(byte[] key, long count) {
return write(key, ByteArrayCodec.INSTANCE, SPOP, key, count);
}
@Override
public Boolean sMove(byte[] srcKey, byte[] destKey, byte[] value) {
return write(srcKey, StringCodec.INSTANCE, RedisCommands.SMOVE, srcKey, destKey, value);
}
private static final RedisStrictCommand<Long> SCARD = new RedisStrictCommand<Long>("SCARD");
@Override
public Long sCard(byte[] key) {
return read(key, StringCodec.INSTANCE, SCARD, key);
}
@Override
public Boolean sIsMember(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.SISMEMBER, key, value);
}
@Override
public Set<byte[]> sInter(byte[]... keys) {
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.SINTER, Arrays.asList(keys).toArray());
}
@Override
public Long sInterStore(byte[] destKey, byte[]... keys) {
List<Object> args = new ArrayList<Object>(keys.length + 1);
args.add(destKey);
args.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, RedisCommands.SINTERSTORE, args.toArray());
}
@Override
public Set<byte[]> sUnion(byte[]... keys) {
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.SUNION, Arrays.asList(keys).toArray());
}
@Override
public Long sUnionStore(byte[] destKey, byte[]... keys) {
List<Object> args = new ArrayList<Object>(keys.length + 1);
args.add(destKey);
args.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, RedisCommands.SUNIONSTORE, args.toArray());
}
@Override
public Set<byte[]> sDiff(byte[]... keys) {
return write(keys[0], ByteArrayCodec.INSTANCE, RedisCommands.SDIFF, Arrays.asList(keys).toArray());
}
@Override
public Long sDiffStore(byte[] destKey, byte[]... keys) {
List<Object> args = new ArrayList<Object>(keys.length + 1);
args.add(destKey);
args.addAll(Arrays.asList(keys));
return write(keys[0], StringCodec.INSTANCE, RedisCommands.SDIFFSTORE, args.toArray());
}
@Override
public Set<byte[]> sMembers(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.SMEMBERS, key);
}
@Override
public byte[] sRandMember(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.SRANDMEMBER_SINGLE, key);
}
private static final RedisCommand<List<Object>> SRANDMEMBER = new RedisCommand<>("SRANDMEMBER", new ObjectListReplayDecoder<>());
@Override
public List<byte[]> sRandMember(byte[] key, long count) {
return read(key, ByteArrayCodec.INSTANCE, SRANDMEMBER, key, count);
}
@Override
public Cursor<byte[]> sScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<byte[]>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<byte[]> doScan(byte[] key, long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<byte[]>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, RedisCommands.SSCAN, args.toArray());
ListScanResult<byte[]> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<byte[]>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
@Override
public Boolean zAdd(byte[] key, double score, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.ZADD_BOOL, key, BigDecimal.valueOf(score).toPlainString(), value);
}
@Override
public Long zAdd(byte[] key, Set<Tuple> tuples) {
List<Object> params = new ArrayList<Object>(tuples.size()*2+1);
params.add(key);
for (Tuple entry : tuples) {
params.add(BigDecimal.valueOf(entry.getScore()).toPlainString());
params.add(entry.getValue());
}
return write(key, StringCodec.INSTANCE, RedisCommands.ZADD, params.toArray());
}
@Override
public Long zRem(byte[] key, byte[]... values) {
List<Object> params = new ArrayList<Object>(values.length+1);
params.add(key);
params.addAll(Arrays.asList(values));
return write(key, StringCodec.INSTANCE, RedisCommands.ZREM_LONG, params.toArray());
}
@Override
public Double zIncrBy(byte[] key, double increment, byte[] value) {
return write(key, DoubleCodec.INSTANCE, RedisCommands.ZINCRBY,
key, new BigDecimal(increment).toPlainString(), value);
}
@Override
public Long zRank(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZRANK, key, value);
}
@Override
public Long zRevRank(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZREVRANK, key, value);
}
private static final RedisCommand<Set<Object>> ZRANGE = new RedisCommand<Set<Object>>("ZRANGE", new ObjectSetReplayDecoder<Object>());
@Override
public Set<byte[]> zRange(byte[] key, long start, long end) {
return read(key, ByteArrayCodec.INSTANCE, ZRANGE, key, start, end);
}
private static final RedisCommand<Set<Tuple>> ZRANGE_ENTRY = new RedisCommand<Set<Tuple>>("ZRANGE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZRANGE_ENTRY_V2 = new RedisCommand<Set<Tuple>>("ZRANGE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<Tuple> zRangeWithScores(byte[] key, long start, long end) {
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY_V2, key, start, end, "WITHSCORES");
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY, key, start, end, "WITHSCORES");
}
private String value(Range.Boundary boundary, String defaultValue) {
if (boundary == null) {
return defaultValue;
}
Object score = boundary.getValue();
if (score == null) {
return defaultValue;
}
StringBuilder element = new StringBuilder();
if (!boundary.isIncluding()) {
element.append("(");
} else {
if (!(score instanceof Double)) {
element.append("[");
}
}
if (score instanceof Double) {
if (Double.isInfinite((Double) score)) {
element.append((Double)score > 0 ? "+inf" : "-inf");
} else {
element.append(BigDecimal.valueOf((Double)score).toPlainString());
}
} else {
element.append(score);
}
return element.toString();
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, double min, double max) {
return zRangeByScore(key, new Range().gte(min).lte(max));
}
@Override
public Set<Tuple> zRangeByScoreWithScores(byte[] key, Range range) {
return zRangeByScoreWithScores(key, range, null);
}
@Override
public Set<Tuple> zRangeByScoreWithScores(byte[] key, double min, double max) {
return zRangeByScoreWithScores(key, new Range().gte(min).lte(max));
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, double min, double max, long offset, long count) {
return zRangeByScore(key, new Range().gte(min).lte(max),
new Limit().offset(Long.valueOf(offset).intValue()).count(Long.valueOf(count).intValue()));
}
@Override
public Set<Tuple> zRangeByScoreWithScores(byte[] key, double min, double max, long offset, long count) {
return zRangeByScoreWithScores(key, new Range().gte(min).lte(max),
new Limit().offset(Long.valueOf(offset).intValue()).count(Long.valueOf(count).intValue()));
}
private static final RedisCommand<Set<Tuple>> ZRANGEBYSCORE = new RedisCommand<Set<Tuple>>("ZRANGEBYSCORE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZRANGEBYSCORE_V2 = new RedisCommand<Set<Tuple>>("ZRANGEBYSCORE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<Tuple> zRangeByScoreWithScores(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(min);
args.add(max);
args.add("WITHSCORES");
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYSCORE_V2, args.toArray());
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYSCORE, args.toArray());
}
private static final RedisCommand<Set<Object>> ZREVRANGE = new RedisCommand<Set<Object>>("ZREVRANGE", new ObjectSetReplayDecoder<Object>());
@Override
public Set<byte[]> zRevRange(byte[] key, long start, long end) {
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGE, key, start, end);
}
private static final RedisCommand<Set<Tuple>> ZREVRANGE_ENTRY = new RedisCommand<Set<Tuple>>("ZREVRANGE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZREVRANGE_ENTRY_V2 = new RedisCommand("ZREVRANGE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<Tuple> zRevRangeWithScores(byte[] key, long start, long end) {
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGE_ENTRY_V2, key, start, end, "WITHSCORES");
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGE_ENTRY, key, start, end, "WITHSCORES");
}
@Override
public Set<byte[]> zRevRangeByScore(byte[] key, double min, double max) {
return zRevRangeByScore(key, new Range().gte(min).lte(max));
}
private static final RedisCommand<Set<byte[]>> ZREVRANGEBYSCORE = new RedisCommand<Set<byte[]>>("ZREVRANGEBYSCORE", new ObjectSetReplayDecoder<byte[]>());
private static final RedisCommand<Set<Tuple>> ZREVRANGEBYSCOREWITHSCORES = new RedisCommand<Set<Tuple>>("ZREVRANGEBYSCORE", new ScoredSortedSetReplayDecoder());
private static final RedisCommand<Set<Tuple>> ZREVRANGEBYSCOREWITHSCORES_V2 = new RedisCommand<Set<Tuple>>("ZREVRANGEBYSCORE",
new ListMultiDecoder2(new ObjectSetReplayDecoder(), new ScoredSortedSetReplayDecoderV2()));
@Override
public Set<byte[]> zRevRangeByScore(byte[] key, Range range) {
return zRevRangeByScore(key, range, null);
}
@Override
public Set<Tuple> zRevRangeByScoreWithScores(byte[] key, double min, double max) {
return zRevRangeByScoreWithScores(key, new Range().gte(min).lte(max));
}
@Override
public Set<byte[]> zRevRangeByScore(byte[] key, double min, double max, long offset, long count) {
return zRevRangeByScore(key, new Range().gte(min).lte(max),
new Limit().offset(Long.valueOf(offset).intValue()).count(Long.valueOf(count).intValue()));
}
@Override
public Set<byte[]> zRevRangeByScore(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(max);
args.add(min);
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYSCORE, args.toArray());
}
@Override
public Set<Tuple> zRevRangeByScoreWithScores(byte[] key, double min, double max, long offset, long count) {
return zRevRangeByScoreWithScores(key, new Range().gte(min).lte(max),
new Limit().offset(Long.valueOf(offset).intValue()).count(Long.valueOf(count).intValue()));
}
@Override
public Set<Tuple> zRevRangeByScoreWithScores(byte[] key, Range range) {
return zRevRangeByScoreWithScores(key, range, null);
}
@Override
public Set<Tuple> zRevRangeByScoreWithScores(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(max);
args.add(min);
args.add("WITHSCORES");
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYSCOREWITHSCORES_V2, args.toArray());
}
return read(key, ByteArrayCodec.INSTANCE, ZREVRANGEBYSCOREWITHSCORES, args.toArray());
}
@Override
public Long zCount(byte[] key, double min, double max) {
return zCount(key, new Range().gte(min).lte(max));
}
private static final RedisStrictCommand<Long> ZCOUNT = new RedisStrictCommand<Long>("ZCOUNT");
@Override
public Long zCount(byte[] key, Range range) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
return read(key, StringCodec.INSTANCE, ZCOUNT, key, min, max);
}
@Override
public Long zCard(byte[] key) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZCARD, key);
}
@Override
public Double zScore(byte[] key, byte[] value) {
return read(key, StringCodec.INSTANCE, RedisCommands.ZSCORE, key, value);
}
private static final RedisStrictCommand<Long> ZREMRANGEBYRANK = new RedisStrictCommand<Long>("ZREMRANGEBYRANK");
private static final RedisStrictCommand<Long> ZREMRANGEBYSCORE = new RedisStrictCommand<Long>("ZREMRANGEBYSCORE");
@Override
public Long zRemRange(byte[] key, long start, long end) {
return write(key, StringCodec.INSTANCE, ZREMRANGEBYRANK, key, start, end);
}
@Override
public Long zRemRangeByScore(byte[] key, double min, double max) {
return zRemRangeByScore(key, new Range().gte(min).lte(max));
}
@Override
public Long zRemRangeByScore(byte[] key, Range range) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
return write(key, StringCodec.INSTANCE, ZREMRANGEBYSCORE, key, min, max);
}
@Override
public Long zUnionStore(byte[] destKey, byte[]... sets) {
return zUnionStore(destKey, null, (Weights)null, sets);
}
private static final RedisStrictCommand<Long> ZUNIONSTORE = new RedisStrictCommand<Long>("ZUNIONSTORE");
@Override
public Long zUnionStore(byte[] destKey, Aggregate aggregate, Weights weights, byte[]... sets) {
List<Object> args = new ArrayList<Object>(sets.length*2 + 5);
args.add(destKey);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
if (weights != null) {
args.add("WEIGHTS");
for (double weight : weights.toArray()) {
args.add(BigDecimal.valueOf(weight).toPlainString());
}
}
if (aggregate != null) {
args.add("AGGREGATE");
args.add(aggregate.name());
}
return write(destKey, LongCodec.INSTANCE, ZUNIONSTORE, args.toArray());
}
private static final RedisStrictCommand<Long> ZINTERSTORE = new RedisStrictCommand<Long>("ZINTERSTORE");
@Override
public Long zInterStore(byte[] destKey, byte[]... sets) {
return zInterStore(destKey, null, (Weights)null, sets);
}
@Override
public Long zInterStore(byte[] destKey, Aggregate aggregate, Weights weights, byte[]... sets) {
List<Object> args = new ArrayList<Object>(sets.length*2 + 5);
args.add(destKey);
args.add(sets.length);
args.addAll(Arrays.asList(sets));
if (weights != null) {
args.add("WEIGHTS");
for (double weight : weights.toArray()) {
args.add(BigDecimal.valueOf(weight).toPlainString());
}
}
if (aggregate != null) {
args.add("AGGREGATE");
args.add(aggregate.name());
}
return write(destKey, StringCodec.INSTANCE, ZINTERSTORE, args.toArray());
}
private static final RedisCommand<ListScanResult<Object>> ZSCAN = new RedisCommand<>("ZSCAN", new ListMultiDecoder2(new ListScanResultReplayDecoder(), new ScoredSortedListReplayDecoder()));
@Override
public Cursor<Tuple> zScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<Tuple>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<Tuple> doScan(byte[] key, long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<Tuple>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, ZSCAN, args.toArray());
ListScanResult<Tuple> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<Tuple>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, String min, String max) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, key, min, max);
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, Range range) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, key, min, max);
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, String min, String max, long offset, long count) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, key, min, max, "LIMIT", offset, count);
}
@Override
public Set<byte[]> zRangeByScore(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-inf");
String max = value(range.getMax(), "+inf");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(min);
args.add(max);
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.ZRANGEBYSCORE, args.toArray());
}
@Override
public Set<byte[]> zRangeByLex(byte[] key) {
return zRangeByLex(key, Range.unbounded());
}
private static final RedisCommand<Set<Object>> ZRANGEBYLEX = new RedisCommand<Set<Object>>("ZRANGEBYLEX", new ObjectSetReplayDecoder<Object>());
@Override
public Set<byte[]> zRangeByLex(byte[] key, Range range) {
List<Object> params = new ArrayList<Object>();
params.add(key);
if (range.getMin() != null) {
String min = value(range.getMin(), "-");
params.add(min);
} else {
params.add("-");
}
if (range.getMax() != null) {
String max = value(range.getMax(), "+");
params.add(max);
} else {
params.add("+");
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYLEX, params.toArray());
}
@Override
public Set<byte[]> zRangeByLex(byte[] key, Range range, Limit limit) {
String min = value(range.getMin(), "-");
String max = value(range.getMax(), "+");
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(min);
args.add(max);
if (limit != null) {
args.add("LIMIT");
args.add(limit.getOffset());
args.add(limit.getCount());
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGEBYLEX, args.toArray());
}
@Override
public Boolean hSet(byte[] key, byte[] field, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.HSET, key, field, value);
}
@Override
public Boolean hSetNX(byte[] key, byte[] field, byte[] value) {
return write(key, StringCodec.INSTANCE, RedisCommands.HSETNX, key, field, value);
}
@Override
public byte[] hGet(byte[] key, byte[] field) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HGET, key, field);
}
private static final RedisCommand<List<Object>> HMGET = new RedisCommand<List<Object>>("HMGET", new ObjectListReplayDecoder<Object>());
@Override
public List<byte[]> hMGet(byte[] key, byte[]... fields) {
List<Object> args = new ArrayList<Object>(fields.length + 1);
args.add(key);
args.addAll(Arrays.asList(fields));
return read(key, ByteArrayCodec.INSTANCE, HMGET, args.toArray());
}
@Override
public void hMSet(byte[] key, Map<byte[], byte[]> hashes) {
List<Object> params = new ArrayList<Object>(hashes.size()*2 + 1);
params.add(key);
for (Map.Entry<byte[], byte[]> entry : hashes.entrySet()) {
params.add(entry.getKey());
params.add(entry.getValue());
}
write(key, StringCodec.INSTANCE, RedisCommands.HMSET, params.toArray());
}
private static final RedisCommand<Long> HINCRBY = new RedisCommand<Long>("HINCRBY");
@Override
public Long hIncrBy(byte[] key, byte[] field, long delta) {
return write(key, StringCodec.INSTANCE, HINCRBY, key, field, delta);
}
private static final RedisCommand<Double> HINCRBYFLOAT = new RedisCommand<Double>("HINCRBYFLOAT", new DoubleReplayConvertor());
@Override
public Double hIncrBy(byte[] key, byte[] field, double delta) {
return write(key, StringCodec.INSTANCE, HINCRBYFLOAT, key, field, BigDecimal.valueOf(delta).toPlainString());
}
@Override
public Boolean hExists(byte[] key, byte[] field) {
return read(key, StringCodec.INSTANCE, RedisCommands.HEXISTS, key, field);
}
@Override
public Long hDel(byte[] key, byte[]... fields) {
List<Object> args = new ArrayList<Object>(fields.length + 1);
args.add(key);
args.addAll(Arrays.asList(fields));
return write(key, StringCodec.INSTANCE, RedisCommands.HDEL, args.toArray());
}
private static final RedisStrictCommand<Long> HLEN = new RedisStrictCommand<Long>("HLEN");
@Override
public Long hLen(byte[] key) {
return read(key, StringCodec.INSTANCE, HLEN, key);
}
@Override
public Set<byte[]> hKeys(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HKEYS, key);
}
@Override
public List<byte[]> hVals(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HVALS, key);
}
@Override
public Map<byte[], byte[]> hGetAll(byte[] key) {
return read(key, ByteArrayCodec.INSTANCE, RedisCommands.HGETALL, key);
}
@Override
public Cursor<Entry<byte[], byte[]>> hScan(byte[] key, ScanOptions options) {
return new KeyBoundCursor<Entry<byte[], byte[]>>(key, 0, options) {
private RedisClient client;
@Override
protected ScanIteration<Entry<byte[], byte[]>> doScan(byte[] key, long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'HSCAN' cannot be called in pipeline / transaction mode.");
}
List<Object> args = new ArrayList<Object>();
args.add(key);
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<MapScanResult<byte[], byte[]>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, RedisCommands.HSCAN, args.toArray());
MapScanResult<byte[], byte[]> res = syncFuture(f);
client = res.getRedisClient();
return new ScanIteration<Entry<byte[], byte[]>>(Long.parseUnsignedLong(res.getPos()), res.getValues());
}
}.open();
}
@Override
public void multi() {
if (isQueueing()) {
return;
}
if (isPipelined()) {
BatchOptions options = BatchOptions.defaults()
.executionMode(ExecutionMode.IN_MEMORY_ATOMIC);
this.executorService = executorService.createCommandBatchService(options);
return;
}
BatchOptions options = BatchOptions.defaults()
.executionMode(ExecutionMode.REDIS_WRITE_ATOMIC);
this.executorService = executorService.createCommandBatchService(options);
}
@Override
public List<Object> exec() {
if (isPipelinedAtomic()) {
return null;
}
if (isQueueing()) {
try {
BatchResult<?> result = ((CommandBatchService)executorService).execute();
filterResults(result);
return (List<Object>) result.getResponses();
} catch (Exception ex) {
throw transform(ex);
} finally {
resetConnection();
}
} else {
throw new InvalidDataAccessApiUsageException("Not in transaction mode. Please invoke multi method");
}
}
protected void filterResults(BatchResult<?> result) {
if (result.getResponses().isEmpty()) {
return;
}
int t = 0;
for (Integer index : indexToRemove) {
index -= t;
result.getResponses().remove((int)index);
t++;
}
for (ListIterator<Object> iterator = (ListIterator<Object>) result.getResponses().listIterator(); iterator.hasNext();) {
Object object = iterator.next();
if (object instanceof String) {
iterator.set(((String) object).getBytes());
}
}
}
protected void resetConnection() {
executorService = this.redisson.getCommandExecutor();
index = -1;
indexToRemove.clear();
}
@Override
public void discard() {
if (isQueueing()) {
syncFuture(executorService.writeAsync(null, RedisCommands.DISCARD));
resetConnection();
} else {
throw new InvalidDataAccessApiUsageException("Not in transaction mode. Please invoke multi method");
}
}
@Override
public void watch(byte[]... keys) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
syncFuture(executorService.writeAsync(null, RedisCommands.WATCH, keys));
}
@Override
public void unwatch() {
syncFuture(executorService.writeAsync(null, RedisCommands.UNWATCH));
}
@Override
public boolean isSubscribed() {
return subscription != null && subscription.isAlive();
}
@Override
public Subscription getSubscription() {
return subscription;
}
@Override
public Long publish(byte[] channel, byte[] message) {
return write(channel, StringCodec.INSTANCE, RedisCommands.PUBLISH, channel, message);
}
@Override
public void subscribe(MessageListener listener, byte[]... channels) {
checkSubscription();
subscription = new RedissonSubscription(redisson.getCommandExecutor(), listener);
subscription.subscribe(channels);
}
private void checkSubscription() {
if (subscription != null) {
throw new RedisSubscribedConnectionException("Connection already subscribed");
}
if (isQueueing()) {
throw new UnsupportedOperationException("Not supported in queueing mode");
}
if (isPipelined()) {
throw new UnsupportedOperationException("Not supported in pipelined mode");
}
}
@Override
public void pSubscribe(MessageListener listener, byte[]... patterns) {
checkSubscription();
subscription = new RedissonSubscription(redisson.getCommandExecutor(), listener);
subscription.pSubscribe(patterns);
}
@Override
public void select(int dbIndex) {
throw new UnsupportedOperationException();
}
private static final RedisCommand<Object> ECHO = new RedisCommand<Object>("ECHO");
@Override
public byte[] echo(byte[] message) {
return read(null, ByteArrayCodec.INSTANCE, ECHO, message);
}
@Override
public String ping() {
return read(null, StringCodec.INSTANCE, RedisCommands.PING);
}
@Override
public void bgWriteAof() {
throw new UnsupportedOperationException();
}
@Override
public void bgReWriteAof() {
write(null, StringCodec.INSTANCE, RedisCommands.BGREWRITEAOF);
}
@Override
public void bgSave() {
write(null, StringCodec.INSTANCE, RedisCommands.BGSAVE);
}
@Override
public Long lastSave() {
return write(null, StringCodec.INSTANCE, RedisCommands.LASTSAVE);
}
private static final RedisStrictCommand<Void> SAVE = new RedisStrictCommand<Void>("SAVE", new VoidReplayConvertor());
@Override
public void save() {
write(null, StringCodec.INSTANCE, SAVE);
}
@Override
public Long dbSize() {
if (isQueueing()) {
return read(null, StringCodec.INSTANCE, RedisCommands.DBSIZE);
}
List<CompletableFuture<Long>> futures = executorService.readAllAsync(RedisCommands.DBSIZE);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<Long> s = f.thenApply(r -> futures.stream().mapToLong(v -> v.getNow(0L)).sum());
CompletableFutureWrapper<Long> ff = new CompletableFutureWrapper<>(s);
return sync(ff);
}
@Override
public void flushDb() {
if (isQueueing() || isPipelined()) {
write(null, StringCodec.INSTANCE, RedisCommands.FLUSHDB);
return;
}
RFuture<Void> f = executorService.writeAllVoidAsync(RedisCommands.FLUSHDB);
sync(f);
}
@Override
public void flushAll() {
RFuture<Void> f = executorService.writeAllVoidAsync(RedisCommands.FLUSHALL);
sync(f);
}
private static final RedisStrictCommand<Properties> INFO_DEFAULT = new RedisStrictCommand<Properties>("INFO", "DEFAULT", new ObjectDecoder(new PropertiesDecoder()));
private static final RedisStrictCommand<Properties> INFO = new RedisStrictCommand<Properties>("INFO", new ObjectDecoder(new PropertiesDecoder()));
@Override
public Properties info() {
return read(null, StringCodec.INSTANCE, INFO_DEFAULT);
}
@Override
public Properties info(String section) {
return read(null, StringCodec.INSTANCE, INFO, section);
}
@Override
public void shutdown() {
throw new UnsupportedOperationException();
}
@Override
public void shutdown(ShutdownOption option) {
throw new UnsupportedOperationException();
}
private static final RedisStrictCommand<Properties> CONFIG_GET = new RedisStrictCommand<Properties>("CONFIG", "GET", new PropertiesListDecoder());
@Override
public Properties getConfig(String pattern) {
return read(null, StringCodec.INSTANCE, CONFIG_GET, pattern);
}
@Override
public void setConfig(String param, String value) {
write(null, StringCodec.INSTANCE, RedisCommands.CONFIG_SET, param, value);
}
@Override
public void resetConfigStats() {
write(null, StringCodec.INSTANCE, RedisCommands.CONFIG_RESETSTAT);
}
private static final RedisStrictCommand<Long> TIME = new RedisStrictCommand<Long>("TIME", new TimeLongObjectDecoder());
@Override
public Long time() {
return read(null, LongCodec.INSTANCE, TIME);
}
@Override
public void killClient(String host, int port) {
throw new UnsupportedOperationException();
}
@Override
public void setClientName(byte[] name) {
throw new UnsupportedOperationException("Should be defined through Redisson Config object");
}
@Override
public String getClientName() {
throw new UnsupportedOperationException();
}
@Override
public List<RedisClientInfo> getClientList() {
return read(null, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST);
}
@Override
public void slaveOf(String host, int port) {
throw new UnsupportedOperationException();
}
@Override
public void slaveOfNoOne() {
throw new UnsupportedOperationException();
}
@Override
public void migrate(byte[] key, RedisNode target, int dbIndex, MigrateOption option) {
migrate(key, target, dbIndex, option, Long.MAX_VALUE);
}
@Override
public void migrate(byte[] key, RedisNode target, int dbIndex, MigrateOption option, long timeout) {
write(key, StringCodec.INSTANCE, RedisCommands.MIGRATE, target.getHost(), target.getPort(), key, dbIndex, timeout);
}
@Override
public void scriptFlush() {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException();
}
RFuture<Void> f = executorService.writeAllVoidAsync(RedisCommands.SCRIPT_FLUSH);
sync(f);
}
@Override
public void scriptKill() {
throw new UnsupportedOperationException();
}
@Override
public String scriptLoad(byte[] script) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
if (isPipelined()) {
throw new UnsupportedOperationException();
}
List<CompletableFuture<String>> futures = executorService.executeAllAsync(RedisCommands.SCRIPT_LOAD, (Object)script);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<String> s = f.thenApply(r -> futures.get(0).getNow(null));
return sync(new CompletableFutureWrapper<>(s));
}
@Override
public List<Boolean> scriptExists(final String... scriptShas) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException();
}
List<CompletableFuture<List<Boolean>>> futures = executorService.writeAllAsync(RedisCommands.SCRIPT_EXISTS, (Object[]) scriptShas);
CompletableFuture<Void> f = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
CompletableFuture<List<Boolean>> s = f.thenApply(r -> {
List<Boolean> result = futures.get(0).getNow(new ArrayList<>());
for (CompletableFuture<List<Boolean>> future : futures.subList(1, futures.size())) {
List<Boolean> l = future.getNow(new ArrayList<>());
for (int i = 0; i < l.size(); i++) {
result.set(i, result.get(i) | l.get(i));
}
}
return result;
});
return sync(new CompletableFutureWrapper<>(s));
}
@Override
public <T> T eval(byte[] script, ReturnType returnType, int numKeys, byte[]... keysAndArgs) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
if (isPipelined()) {
throw new UnsupportedOperationException();
}
RedisCommand<?> c = toCommand(returnType, "EVAL");
List<Object> params = new ArrayList<Object>();
params.add(script);
params.add(numKeys);
params.addAll(Arrays.asList(keysAndArgs));
byte[] key = getKey(numKeys, keysAndArgs);
return write(key, ByteArrayCodec.INSTANCE, c, params.toArray());
}
protected RedisCommand<?> toCommand(ReturnType returnType, String name) {
RedisCommand<?> c = null;
if (returnType == ReturnType.BOOLEAN) {
c = org.redisson.api.RScript.ReturnType.BOOLEAN.getCommand();
} else if (returnType == ReturnType.INTEGER) {
c = org.redisson.api.RScript.ReturnType.INTEGER.getCommand();
} else if (returnType == ReturnType.MULTI) {
c = org.redisson.api.RScript.ReturnType.MULTI.getCommand();
return new RedisCommand(c, name, new BinaryConvertor());
} else if (returnType == ReturnType.STATUS) {
c = org.redisson.api.RScript.ReturnType.STATUS.getCommand();
} else if (returnType == ReturnType.VALUE) {
c = org.redisson.api.RScript.ReturnType.VALUE.getCommand();
return new RedisCommand(c, name, new BinaryConvertor());
}
return new RedisCommand(c, name);
}
@Override
public <T> T evalSha(String scriptSha, ReturnType returnType, int numKeys, byte[]... keysAndArgs) {
if (isQueueing()) {
throw new UnsupportedOperationException();
}
if (isPipelined()) {
throw new UnsupportedOperationException();
}
RedisCommand<?> c = toCommand(returnType, "EVALSHA");
List<Object> params = new ArrayList<Object>();
params.add(scriptSha);
params.add(numKeys);
params.addAll(Arrays.asList(keysAndArgs));
byte[] key = getKey(numKeys, keysAndArgs);
return write(key, ByteArrayCodec.INSTANCE, c, params.toArray());
}
@Override
public <T> T evalSha(byte[] scriptSha, ReturnType returnType, int numKeys, byte[]... keysAndArgs) {
RedisCommand<?> c = toCommand(returnType, "EVALSHA");
List<Object> params = new ArrayList<Object>();
params.add(scriptSha);
params.add(numKeys);
params.addAll(Arrays.asList(keysAndArgs));
byte[] key = getKey(numKeys, keysAndArgs);
return write(key, ByteArrayCodec.INSTANCE, c, params.toArray());
}
private static byte[] getKey(int numKeys, byte[][] keysAndArgs) {
if (numKeys > 0 && keysAndArgs.length > 0) {
return keysAndArgs[0];
}
return null;
}
@Override
public Long geoAdd(byte[] key, Point point, byte[] member) {
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, key, point.getX(), point.getY(), member);
}
@Override
public Long geoAdd(byte[] key, GeoLocation<byte[]> location) {
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, key, location.getPoint().getX(), location.getPoint().getY(), location.getName());
}
@Override
public Long geoAdd(byte[] key, Map<byte[], Point> memberCoordinateMap) {
List<Object> params = new ArrayList<Object>(memberCoordinateMap.size()*3 + 1);
params.add(key);
for (Entry<byte[], Point> entry : memberCoordinateMap.entrySet()) {
params.add(entry.getValue().getX());
params.add(entry.getValue().getY());
params.add(entry.getKey());
}
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, params.toArray());
}
@Override
public Long geoAdd(byte[] key, Iterable<GeoLocation<byte[]>> locations) {
List<Object> params = new ArrayList<Object>();
params.add(key);
for (GeoLocation<byte[]> location : locations) {
params.add(location.getPoint().getX());
params.add(location.getPoint().getY());
params.add(location.getName());
}
return write(key, StringCodec.INSTANCE, RedisCommands.GEOADD, params.toArray());
}
@Override
public Distance geoDist(byte[] key, byte[] member1, byte[] member2) {
return geoDist(key, member1, member2, DistanceUnit.METERS);
}
@Override
public Distance geoDist(byte[] key, byte[] member1, byte[] member2, Metric metric) {
return read(key, DoubleCodec.INSTANCE, new RedisCommand<Distance>("GEODIST", new DistanceConvertor(metric)), key, member1, member2, getAbbreviation(metric));
}
private static final RedisCommand<List<Object>> GEOHASH = new RedisCommand<List<Object>>("GEOHASH", new ObjectListReplayDecoder<Object>());
@Override
public List<String> geoHash(byte[] key, byte[]... members) {
List<Object> params = new ArrayList<Object>(members.length + 1);
params.add(key);
for (byte[] member : members) {
params.add(member);
}
return read(key, StringCodec.INSTANCE, GEOHASH, params.toArray());
}
private final MultiDecoder<Map<Object, Object>> geoDecoder = new ListMultiDecoder2(new ObjectListReplayDecoder2(), new PointDecoder());
@Override
public List<Point> geoPos(byte[] key, byte[]... members) {
List<Object> params = new ArrayList<Object>(members.length + 1);
params.add(key);
params.addAll(Arrays.asList(members));
RedisCommand<Map<Object, Object>> command = new RedisCommand<Map<Object, Object>>("GEOPOS", geoDecoder);
return read(key, StringCodec.INSTANCE, command, params.toArray());
}
private String convert(double longitude) {
return BigDecimal.valueOf(longitude).toPlainString();
}
private final MultiDecoder<GeoResults<GeoLocation<byte[]>>> postitionDecoder = new ListMultiDecoder2(new GeoResultsDecoder(), new CodecDecoder(), new PointDecoder(), new ObjectListReplayDecoder());
@Override
public GeoResults<GeoLocation<byte[]>> geoRadius(byte[] key, Circle within) {
RedisCommand<GeoResults<GeoLocation<byte[]>>> command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUS_RO", new GeoResultsDecoder());
return read(key, ByteArrayCodec.INSTANCE, command, key,
convert(within.getCenter().getX()), convert(within.getCenter().getY()),
within.getRadius().getValue(), getAbbreviation(within.getRadius().getMetric()));
}
@Override
public GeoResults<GeoLocation<byte[]>> geoRadius(byte[] key, Circle within, GeoRadiusCommandArgs args) {
List<Object> params = new ArrayList<Object>();
params.add(key);
params.add(convert(within.getCenter().getX()));
params.add(convert(within.getCenter().getY()));
params.add(within.getRadius().getValue());
params.add(getAbbreviation(within.getRadius().getMetric()));
RedisCommand<GeoResults<GeoLocation<byte[]>>> command;
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUS_RO", postitionDecoder);
params.add("WITHCOORD");
} else {
MultiDecoder<GeoResults<GeoLocation<byte[]>>> distanceDecoder = new ListMultiDecoder2(new GeoResultsDecoder(within.getRadius().getMetric()), new GeoDistanceDecoder());
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUS_RO", distanceDecoder);
params.add("WITHDIST");
}
if (args.getLimit() != null) {
params.add("COUNT");
params.add(args.getLimit());
}
if (args.getSortDirection() != null) {
params.add(args.getSortDirection().name());
}
return read(key, ByteArrayCodec.INSTANCE, command, params.toArray());
}
private String getAbbreviation(Metric metric) {
if (ObjectUtils.nullSafeEquals(Metrics.NEUTRAL, metric)) {
return DistanceUnit.METERS.getAbbreviation();
}
return metric.getAbbreviation();
}
@Override
public GeoResults<GeoLocation<byte[]>> geoRadiusByMember(byte[] key, byte[] member, double radius) {
return geoRadiusByMember(key, member, new Distance(radius, DistanceUnit.METERS));
}
private static final RedisCommand<GeoResults<GeoLocation<byte[]>>> GEORADIUSBYMEMBER = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUSBYMEMBER_RO", new GeoResultsDecoder());
@Override
public GeoResults<GeoLocation<byte[]>> geoRadiusByMember(byte[] key, byte[] member, Distance radius) {
return read(key, ByteArrayCodec.INSTANCE, GEORADIUSBYMEMBER, key, member, radius.getValue(), getAbbreviation(radius.getMetric()));
}
@Override
public GeoResults<GeoLocation<byte[]>> geoRadiusByMember(byte[] key, byte[] member, Distance radius,
GeoRadiusCommandArgs args) {
List<Object> params = new ArrayList<Object>();
params.add(key);
params.add(member);
params.add(radius.getValue());
params.add(getAbbreviation(radius.getMetric()));
RedisCommand<GeoResults<GeoLocation<byte[]>>> command;
if (args.getFlags().contains(GeoRadiusCommandArgs.Flag.WITHCOORD)) {
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUSBYMEMBER_RO", postitionDecoder);
params.add("WITHCOORD");
} else {
MultiDecoder<GeoResults<GeoLocation<byte[]>>> distanceDecoder = new ListMultiDecoder2(new GeoResultsDecoder(radius.getMetric()), new GeoDistanceDecoder());
command = new RedisCommand<GeoResults<GeoLocation<byte[]>>>("GEORADIUSBYMEMBER_RO", distanceDecoder);
params.add("WITHDIST");
}
if (args.getLimit() != null) {
params.add("COUNT");
params.add(args.getLimit());
}
if (args.getSortDirection() != null) {
params.add(args.getSortDirection().name());
}
return read(key, ByteArrayCodec.INSTANCE, command, params.toArray());
}
@Override
public Long geoRemove(byte[] key, byte[]... members) {
return zRem(key, members);
}
private static final RedisCommand<Long> PFADD = new RedisCommand<Long>("PFADD");
@Override
public Long pfAdd(byte[] key, byte[]... values) {
List<Object> params = new ArrayList<Object>(values.length + 1);
params.add(key);
for (byte[] member : values) {
params.add(member);
}
return write(key, StringCodec.INSTANCE, PFADD, params.toArray());
}
@Override
public Long pfCount(byte[]... keys) {
Assert.notEmpty(keys, "PFCOUNT requires at least one non 'null' key.");
Assert.noNullElements(keys, "Keys for PFOUNT must not contain 'null'.");
return write(keys[0], StringCodec.INSTANCE, RedisCommands.PFCOUNT, Arrays.asList(keys).toArray());
}
@Override
public void pfMerge(byte[] destinationKey, byte[]... sourceKeys) {
List<Object> args = new ArrayList<Object>(sourceKeys.length + 1);
args.add(destinationKey);
args.addAll(Arrays.asList(sourceKeys));
write(destinationKey, StringCodec.INSTANCE, RedisCommands.PFMERGE, args.toArray());
}
private static final RedisCommand<Long> HSTRLEN = new RedisCommand<Long>("HSTRLEN");
@Override
public Long hStrLen(byte[] key, byte[] field) {
return read(key, StringCodec.INSTANCE, HSTRLEN, key, field);
}
@Override
public RedisStreamCommands streamCommands() {
return new RedissonStreamCommands(this, executorService);
}
private static final RedisStrictCommand<List<Object>> BITFIELD = new RedisStrictCommand<>("BITFIELD", new ObjectListReplayDecoder<>());
@Override
public List<Long> bitField(byte[] key, BitFieldSubCommands subCommands) {
List<Object> params = new ArrayList<>();
params.add(key);
boolean writeOp = false;
for (BitFieldSubCommands.BitFieldSubCommand subCommand : subCommands) {
String size = "u";
if (subCommand.getType().isSigned()) {
size = "i";
}
size += subCommand.getType().getBits();
String offset = "#";
if (subCommand.getOffset().isZeroBased()) {
offset = "";
}
offset += subCommand.getOffset().getValue();
if (subCommand instanceof BitFieldSubCommands.BitFieldGet) {
params.add("GET");
params.add(size);
params.add(offset);
} else if (subCommand instanceof BitFieldSubCommands.BitFieldSet) {
writeOp = true;
params.add("SET");
params.add(size);
params.add(offset);
params.add(((BitFieldSubCommands.BitFieldSet) subCommand).getValue());
} else if (subCommand instanceof BitFieldSubCommands.BitFieldIncrBy) {
writeOp = true;
params.add("INCRBY");
params.add(size);
params.add(offset);
params.add(((BitFieldSubCommands.BitFieldIncrBy) subCommand).getValue());
BitFieldSubCommands.BitFieldIncrBy.Overflow overflow = ((BitFieldSubCommands.BitFieldIncrBy) subCommand).getOverflow();
if (overflow != null) {
params.add("OVERFLOW");
params.add(overflow);
}
}
}
if (writeOp) {
return write(key, StringCodec.INSTANCE, BITFIELD, params.toArray());
}
return read(key, StringCodec.INSTANCE, BITFIELD, params.toArray());
}
@Override
public Long exists(byte[]... keys) {
return read(keys[0], StringCodec.INSTANCE, RedisCommands.EXISTS_LONG, Arrays.asList(keys).toArray());
}
@Override
public Long touch(byte[]... keys) {
return read(keys[0], StringCodec.INSTANCE, RedisCommands.TOUCH_LONG, Arrays.asList(keys).toArray());
}
private static final RedisStrictCommand<ValueEncoding> OBJECT_ENCODING = new RedisStrictCommand<ValueEncoding>("OBJECT", "ENCODING", new Convertor<ValueEncoding>() {
@Override
public ValueEncoding convert(Object obj) {
return ValueEncoding.of((String) obj);
}
});
@Override
public ValueEncoding encodingOf(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return read(key, StringCodec.INSTANCE, OBJECT_ENCODING, key);
}
private static final RedisStrictCommand<Duration> OBJECT_IDLETIME = new RedisStrictCommand<>("OBJECT", "IDLETIME", new Convertor<Duration>() {
@Override
public Duration convert(Object obj) {
return Duration.ofSeconds((Long)obj);
}
});
@Override
public Duration idletime(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return read(key, StringCodec.INSTANCE, OBJECT_IDLETIME, key);
}
private static final RedisStrictCommand<Long> OBJECT_REFCOUNT = new RedisStrictCommand<Long>("OBJECT", "REFCOUNT");
@Override
public Long refcount(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return read(key, StringCodec.INSTANCE, OBJECT_REFCOUNT, key);
}
private static final RedisStrictCommand<Long> BITPOS = new RedisStrictCommand<>("BITPOS");
@Override
public Long bitPos(byte[] key, boolean bit, org.springframework.data.domain.Range<Long> range) {
Assert.notNull(key, "Key must not be null!");
Assert.notNull(range, "Range must not be null! Use Range.unbounded() instead.");
List<Object> params = new ArrayList<>();
params.add(key);
if (bit) {
params.add(1);
} else {
params.add(0);
}
if (range.getLowerBound().isBounded()) {
params.add(range.getLowerBound().getValue().get());
if (range.getUpperBound().isBounded()) {
params.add(range.getUpperBound().getValue().get());
}
}
return read(key, StringCodec.INSTANCE, BITPOS, params.toArray());
}
@Override
public void restore(byte[] key, long ttlInMillis, byte[] serializedValue, boolean replace) {
if (replace) {
write(key, StringCodec.INSTANCE, RedisCommands.RESTORE, key, ttlInMillis, serializedValue, "REPLACE");
return;
}
restore(key, ttlInMillis, serializedValue);
}
}
|
RedissonConnection
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/aot/DisabledInAotMode.java
|
{
"start": 993,
"end": 1123
}
|
class ____ <em>disabled</em>
* in Spring AOT (ahead-of-time) mode, which means that the {@code ApplicationContext}
* for the test
|
is
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/heuristic/NXYSignificanceHeuristic.java
|
{
"start": 3988,
"end": 4293
}
|
class ____ do contain term
frequencies.N11 = subsetFreq;
// documents that do not contain term
frequencies.N0_ = supersetSize - supersetFreq;
// documents that contain term
frequencies.N1_ = supersetFreq;
// documents that are not in
|
and
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/MapBindingComponentProcessorTest.java
|
{
"start": 5901,
"end": 5975
}
|
enum ____ { INACCESSIBLE }",
"",
"
|
PackagePrivateEnum
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TestInstanceFactoryTests.java
|
{
"start": 21609,
"end": 21995
}
|
class ____ {
private final int number;
MultipleConstructorsTestCase(String text) {
this.number = -1;
}
MultipleConstructorsTestCase(int number) {
this.number = number;
}
@Test
void test() {
callSequence.add("test: " + this.number);
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
@ExtendWith(FooInstanceFactory.class)
static
|
MultipleConstructorsTestCase
|
java
|
spring-projects__spring-boot
|
core/spring-boot-testcontainers/src/test/java/org/springframework/boot/testcontainers/lifecycle/TestcontainersLifecycleApplicationContextInitializerTests.java
|
{
"start": 1999,
"end": 8664
}
|
class ____ {
@BeforeEach
void setUp() {
TestcontainersConfiguration.getInstance().updateUserConfig("testcontainers.reuse.enable", "false");
}
@Test
void whenStartableBeanInvokesStartOnRefresh() {
Startable container = mock(Startable.class);
AnnotationConfigApplicationContext applicationContext = createApplicationContext(container);
then(container).shouldHaveNoInteractions();
applicationContext.refresh();
then(container).should().start();
applicationContext.close();
}
@Test
void whenStartableBeanInvokesCloseOnShutdown() {
Startable container = mock(Startable.class);
AnnotationConfigApplicationContext applicationContext = createApplicationContext(container);
applicationContext.refresh();
then(container).should(never()).close();
applicationContext.close();
then(container).should(times(1)).close();
}
@Test
void whenReusableContainerAndReuseEnabledBeanInvokesStartButNotClose() {
TestcontainersConfiguration.getInstance().updateUserConfig("testcontainers.reuse.enable", "true");
GenericContainer<?> container = mock(GenericContainer.class);
given(container.isShouldBeReused()).willReturn(true);
AnnotationConfigApplicationContext applicationContext = createApplicationContext(container);
then(container).shouldHaveNoInteractions();
applicationContext.refresh();
then(container).should().start();
applicationContext.close();
then(container).should(never()).close();
}
@Test
void whenReusableContainerButReuseNotEnabledBeanInvokesStartAndClose() {
GenericContainer<?> container = mock(GenericContainer.class);
given(container.isShouldBeReused()).willReturn(true);
AnnotationConfigApplicationContext applicationContext = createApplicationContext(container);
then(container).shouldHaveNoInteractions();
applicationContext.refresh();
then(container).should().start();
applicationContext.close();
then(container).should(times(1)).close();
}
@Test
void whenReusableContainerAndReuseEnabledBeanFromConfigurationInvokesStartButNotClose() {
TestcontainersConfiguration.getInstance().updateUserConfig("testcontainers.reuse.enable", "true");
AnnotationConfigApplicationContext applicationContext = new AnnotationConfigApplicationContext();
new TestcontainersLifecycleApplicationContextInitializer().initialize(applicationContext);
applicationContext.register(ReusableContainerConfiguration.class);
applicationContext.refresh();
GenericContainer<?> container = applicationContext.getBean(GenericContainer.class);
then(container).should().start();
applicationContext.close();
then(container).should(never()).close();
}
@Test
void whenReusableContainerButReuseNotEnabledBeanFromConfigurationInvokesStartAndClose() {
AnnotationConfigApplicationContext applicationContext = new AnnotationConfigApplicationContext();
new TestcontainersLifecycleApplicationContextInitializer().initialize(applicationContext);
applicationContext.register(ReusableContainerConfiguration.class);
applicationContext.refresh();
GenericContainer<?> container = applicationContext.getBean(GenericContainer.class);
then(container).should().start();
applicationContext.close();
then(container).should(times(1)).close();
}
@Test
void doesNotInitializeSameContextMoreThanOnce() {
AnnotationConfigApplicationContext applicationContext = new AnnotationConfigApplicationContext();
int initialNumberOfPostProcessors = applicationContext.getBeanFactoryPostProcessors().size();
for (int i = 0; i < 10; i++) {
new TestcontainersLifecycleApplicationContextInitializer().initialize(applicationContext);
}
assertThat(applicationContext.getBeanFactoryPostProcessors()).hasSize(initialNumberOfPostProcessors + 1);
}
@Test
void dealsWithBeanCurrentlyInCreationException() {
AnnotationConfigApplicationContext applicationContext = new AnnotationConfigApplicationContext();
new TestcontainersLifecycleApplicationContextInitializer().initialize(applicationContext);
applicationContext.register(BeanCurrentlyInCreationExceptionConfiguration2.class,
BeanCurrentlyInCreationExceptionConfiguration1.class);
applicationContext.refresh();
}
@Test
void doesNotStartContainersWhenAotProcessingIsInProgress() {
GenericContainer<?> container = mock(GenericContainer.class);
AnnotationConfigApplicationContext applicationContext = createApplicationContext(container);
then(container).shouldHaveNoInteractions();
withSystemProperty(AbstractAotProcessor.AOT_PROCESSING, "true",
() -> applicationContext.refreshForAotProcessing(new RuntimeHints()));
then(container).shouldHaveNoInteractions();
applicationContext.close();
}
@Test
void setupStartupBasedOnEnvironmentProperty() {
AnnotationConfigApplicationContext applicationContext = new AnnotationConfigApplicationContext();
applicationContext.getEnvironment()
.getPropertySources()
.addLast(new MapPropertySource("test", Map.of("spring.testcontainers.beans.startup", "parallel")));
new TestcontainersLifecycleApplicationContextInitializer().initialize(applicationContext);
AbstractBeanFactory beanFactory = (AbstractBeanFactory) applicationContext.getBeanFactory();
BeanPostProcessor beanPostProcessor = beanFactory.getBeanPostProcessors()
.stream()
.filter(TestcontainersLifecycleBeanPostProcessor.class::isInstance)
.findFirst()
.get();
assertThat(beanPostProcessor).extracting("startup").isEqualTo(TestcontainersStartup.PARALLEL);
}
private void withSystemProperty(String name, String value, Runnable action) {
String previousValue = System.getProperty(name);
System.setProperty(name, value);
try {
action.run();
}
finally {
if (previousValue == null) {
System.clearProperty(name);
}
else {
System.setProperty(name, previousValue);
}
}
}
private AnnotationConfigApplicationContext createApplicationContext(Startable container) {
AnnotationConfigApplicationContext applicationContext = new AnnotationConfigApplicationContext();
new TestcontainersLifecycleApplicationContextInitializer().initialize(applicationContext);
applicationContext.registerBean("container", Startable.class, () -> container);
return applicationContext;
}
private AnnotationConfigApplicationContext createApplicationContext(GenericContainer<?> container) {
AnnotationConfigApplicationContext applicationContext = new AnnotationConfigApplicationContext();
new TestcontainersLifecycleApplicationContextInitializer().initialize(applicationContext);
applicationContext.registerBean("container", GenericContainer.class, () -> container);
return applicationContext;
}
@Configuration
static
|
TestcontainersLifecycleApplicationContextInitializerTests
|
java
|
apache__flink
|
flink-rpc/flink-rpc-core/src/main/java/org/apache/flink/runtime/rpc/RpcEndpoint.java
|
{
"start": 12511,
"end": 18381
}
|
class ____ the self gateway type
* @param <C> type of the self gateway to create
* @return Self gateway of the specified type which can be used to issue asynchronous rpcs
*/
public <C extends RpcGateway> C getSelfGateway(Class<C> selfGatewayType) {
return rpcService.getSelfGateway(selfGatewayType, rpcServer);
}
/**
* Gets the address of the underlying RPC endpoint. The address should be fully qualified so
* that a remote system can connect to this RPC endpoint via this address.
*
* @return Fully qualified address of the underlying RPC endpoint
*/
@Override
public String getAddress() {
return rpcServer.getAddress();
}
/**
* Gets the hostname of the underlying RPC endpoint.
*
* @return Hostname on which the RPC endpoint is running
*/
@Override
public String getHostname() {
return rpcServer.getHostname();
}
/**
* Gets the main thread execution context. The main thread execution context can be used to
* execute tasks in the main thread of the underlying RPC endpoint.
*
* @return Main thread execution context
*/
protected MainThreadExecutor getMainThreadExecutor() {
return mainThreadExecutor;
}
/**
* Gets the main thread execution context. The main thread execution context can be used to
* execute tasks in the main thread of the underlying RPC endpoint.
*
* @param jobID the {@link JobID} to scope the returned {@link ComponentMainThreadExecutor} to,
* i.e. add/remove before/after the invocations using the returned executor
* @return Main thread execution context
*/
protected Executor getMainThreadExecutor(JobID jobID) {
// todo: consider caching
return MdcUtils.scopeToJob(jobID, getMainThreadExecutor());
}
/**
* Gets the endpoint's RPC service.
*
* @return The endpoint's RPC service
*/
public RpcService getRpcService() {
return rpcService;
}
/**
* Return a future which is completed with true when the rpc endpoint has been terminated. In
* case of a failure, this future is completed with the occurring exception.
*
* @return Future which is completed when the rpc endpoint has been terminated.
*/
public CompletableFuture<Void> getTerminationFuture() {
return rpcServer.getTerminationFuture();
}
// ------------------------------------------------------------------------
// Asynchronous executions
// ------------------------------------------------------------------------
/**
* Execute the runnable in the main thread of the underlying RPC endpoint.
*
* @param runnable Runnable to be executed in the main thread of the underlying RPC endpoint
*/
protected void runAsync(Runnable runnable) {
rpcServer.runAsync(runnable);
}
/**
* Execute the runnable in the main thread of the underlying RPC endpoint, with a delay of the
* given number of milliseconds.
*
* @param runnable Runnable to be executed
* @param delay The delay after which the runnable will be executed
*/
protected void scheduleRunAsync(Runnable runnable, Duration delay) {
scheduleRunAsync(runnable, delay.toMillis(), TimeUnit.MILLISECONDS);
}
/**
* Execute the runnable in the main thread of the underlying RPC endpoint, with a delay of the
* given number of milliseconds.
*
* @param runnable Runnable to be executed
* @param delay The delay after which the runnable will be executed
*/
protected void scheduleRunAsync(Runnable runnable, long delay, TimeUnit unit) {
rpcServer.scheduleRunAsync(runnable, unit.toMillis(delay));
}
/**
* Execute the callable in the main thread of the underlying RPC service, returning a future for
* the result of the callable. If the callable is not completed within the given timeout, then
* the future will be failed with a {@link TimeoutException}.
*
* @param callable Callable to be executed in the main thread of the underlying rpc server
* @param timeout Timeout for the callable to be completed
* @param <V> Return type of the callable
* @return Future for the result of the callable.
*/
protected <V> CompletableFuture<V> callAsync(Callable<V> callable, Duration timeout) {
return rpcServer.callAsync(callable, timeout);
}
// ------------------------------------------------------------------------
// Main Thread Validation
// ------------------------------------------------------------------------
/**
* Validates that the method call happens in the RPC endpoint's main thread.
*
* <p><b>IMPORTANT:</b> This check only happens when assertions are enabled, such as when
* running tests.
*
* <p>This can be used for additional checks, like
*
* <pre>{@code
* protected void concurrencyCriticalMethod() {
* validateRunsInMainThread();
*
* // some critical stuff
* }
* }</pre>
*/
public void validateRunsInMainThread() {
assert MainThreadValidatorUtil.isRunningInExpectedThread(currentMainThread.get());
}
/**
* Validate whether all the resources are closed.
*
* @return true if all the resources are closed, otherwise false
*/
boolean validateResourceClosed() {
return mainThreadExecutor.validateScheduledExecutorClosed() && resourceRegistry.isClosed();
}
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/** Executor which executes runnables in the main thread context. */
protected static
|
of
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/blockloader/docvalues/fn/MvMinBytesRefsFromOrdsBlockLoader.java
|
{
"start": 945,
"end": 1604
}
|
class ____ extends AbstractBytesRefsFromOrdsBlockLoader {
private final String fieldName;
public MvMinBytesRefsFromOrdsBlockLoader(String fieldName) {
super(fieldName);
this.fieldName = fieldName;
}
@Override
protected AllReader singletonReader(SortedDocValues docValues) {
return new Singleton(docValues);
}
@Override
protected AllReader sortedSetReader(SortedSetDocValues docValues) {
return new MvMinSortedSet(docValues);
}
@Override
public String toString() {
return "MvMinBytesRefsFromOrds[" + fieldName + "]";
}
private static
|
MvMinBytesRefsFromOrdsBlockLoader
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/MustBeClosedCheckerTest.java
|
{
"start": 11814,
"end": 14406
}
|
interface ____ {
@MustBeClosed
MustBeClosedAnnotatedConstructor getResource();
}
void consumeCloseable(ResourceFactory factory) {
try (Closeable c = factory.getResource()) {}
}
void expressionLambdaReturningCloseable() {
consumeCloseable(() -> new MustBeClosedAnnotatedConstructor());
}
void statementLambdaReturningCloseable() {
consumeCloseable(
() -> {
return new MustBeClosedAnnotatedConstructor();
});
}
void methodReferenceReturningCloseable() {
consumeCloseable(MustBeClosedAnnotatedConstructor::new);
}
void ternaryFunctionalExpressionReturningCloseable(boolean condition) {
consumeCloseable(
condition
? () -> new MustBeClosedAnnotatedConstructor()
: MustBeClosedAnnotatedConstructor::new);
}
void inferredFunctionalExpressionReturningCloseable(ResourceFactory factory) {
ImmutableList.of(
factory,
() -> new MustBeClosedAnnotatedConstructor(),
MustBeClosedAnnotatedConstructor::new)
.forEach(this::consumeCloseable);
}
@MustBeClosed
<C extends AutoCloseable> C mustBeClosed(C c) {
return c;
}
void closedByDecorator() throws IOException {
try (var in = new BufferedInputStream(mustBeClosed(nullInputStream()))) {}
try (var out = new BufferedOutputStream(mustBeClosed(nullOutputStream()))) {}
try (var in = new BufferedInputStream(mustBeClosed(nullInputStream()), 1024)) {}
try (var out = new BufferedOutputStream(mustBeClosed(nullOutputStream()), 1024)) {}
try (var r = new InputStreamReader(mustBeClosed(nullInputStream()))) {}
try (var w = new OutputStreamWriter(mustBeClosed(nullOutputStream()))) {}
try (var r = new BufferedReader(mustBeClosed(nullReader()))) {}
try (var w = new BufferedWriter(mustBeClosed(nullWriter()))) {}
}
}
""")
.doTest();
}
@Test
public void refactoring() {
refactoringHelper
.addInputLines("MustBeClosedCheckerPositiveCases.java", POSITIVE_CASES)
.addOutputLines(
"MustBeClosedCheckerPositiveCases_expected.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import static java.io.OutputStream.nullOutputStream;
import com.google.errorprone.annotations.MustBeClosed;
import java.io.ByteArrayOutputStream;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.function.Supplier;
import java.util.stream.Stream;
import java.util.zip.GZIPOutputStream;
@SuppressWarnings({"UnusedNestedClass", "UnusedVariable"})
|
ResourceFactory
|
java
|
apache__dubbo
|
dubbo-test/dubbo-test-check/src/main/java/org/apache/dubbo/test/check/registrycenter/processor/FindPidWindowsProcessor.java
|
{
"start": 1650,
"end": 5620
}
|
class ____ extends ZookeeperWindowsProcessor {
private static final Logger logger = LoggerFactory.getLogger(FindPidWindowsProcessor.class);
@Override
protected void doProcess(ZookeeperWindowsContext context) throws DubboTestException {
for (int clientPort : context.getClientPorts()) {
this.findPid(context, clientPort);
}
}
/**
* Find the pid of zookeeper instance.
*
* @param context the global context.
* @param clientPort the client port of zookeeper instance.
*/
private void findPid(ZookeeperWindowsContext context, int clientPort) {
logger.info(String.format("Find the pid of the zookeeper with port %d", clientPort));
Executor executor = new DefaultExecutor();
executor.setExitValues(null);
ByteArrayOutputStream out = new ByteArrayOutputStream();
ByteArrayOutputStream ins = new ByteArrayOutputStream();
ByteArrayInputStream in = new ByteArrayInputStream(ins.toByteArray());
executor.setStreamHandler(new PumpStreamHandler(out, null, in));
CommandLine cmdLine = new CommandLine("cmd.exe");
cmdLine.addArgument("/c");
cmdLine.addArgument("netstat -ano | findstr " + clientPort);
try {
executor.execute(cmdLine);
String result = out.toString();
logger.info(String.format("Find result: %s", result));
if (StringUtils.isNotEmpty(result)) {
String[] values = result.split("\\r\\n");
// values sample:
// Protocol Local address Foreign address Status PID
// TCP 127.0.0.1:2182 127.0.0.1:56672 ESTABLISHED 4020
// TCP 127.0.0.1:56672 127.0.0.1:2182 ESTABLISHED 1980
// TCP 127.0.0.1:56692 127.0.0.1:2182 ESTABLISHED 1980
// TCP 127.0.0.1:56723 127.0.0.1:2182 ESTABLISHED 1980
// TCP [::]:2182 [::]:0 LISTENING 4020
if (values != null && values.length > 0) {
for (int i = 0; i < values.length; i++) {
List<String> segments = Arrays.stream(values[i].trim().split(" "))
.filter(str -> !"".equals(str))
.collect(Collectors.toList());
// segments sample:
// TCP
// 127.0.0.1:2182
// 127.0.0.1:56672
// ESTABLISHED
// 4020
if (segments != null && segments.size() == 5) {
if (this.check(segments.get(1), clientPort)) {
int pid = Integer.valueOf(
segments.get(segments.size() - 1).trim());
context.register(clientPort, pid);
return;
}
}
}
}
}
} catch (IOException e) {
throw new DubboTestException(
String.format("Failed to find the PID of zookeeper with port %d", clientPort), e);
}
}
/**
* Checks if segment is valid ip and port pair.
*
* @param segment the segment to check
* @param clientPort the client port of zookeeper instance
* @return {@code true} if segment is valid pair of ip and port, otherwise {@code false}
*/
private boolean check(String segment, int clientPort) {
return ("[::]:" + clientPort).equalsIgnoreCase(segment)
|| ("0.0.0.0:" + clientPort).equalsIgnoreCase(segment)
|| ("127.0.0.1:" + clientPort).equalsIgnoreCase(segment);
}
}
|
FindPidWindowsProcessor
|
java
|
netty__netty
|
transport-native-io_uring/src/test/java/io/netty/channel/uring/IoUringSocketShutdownOutputBySelfTest.java
|
{
"start": 972,
"end": 1335
}
|
class ____ extends SocketShutdownOutputBySelfTest {
@BeforeAll
public static void loadJNI() {
assumeTrue(IoUring.isAvailable());
}
@Override
protected List<TestsuitePermutation.BootstrapFactory<Bootstrap>> newFactories() {
return IoUringSocketTestPermutation.INSTANCE.clientSocket();
}
}
|
IoUringSocketShutdownOutputBySelfTest
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
|
{
"start": 20619,
"end": 30717
}
|
enum ____ {
NULL(0, "NULL"),
QUIT(3, "SIGQUIT"),
KILL(9, "SIGKILL"),
TERM(15, "SIGTERM");
private final int value;
private final String str;
private Signal(int value, String str) {
this.str = str;
this.value = value;
}
/**
* Get the signal number.
* @return the signal number
*/
public int getValue() {
return value;
}
@Override
public String toString() {
return str;
}
}
/**
* Log each line of the output string as INFO level log messages.
*
* @param output the output string to log
*/
protected void logOutput(String output) {
String shExecOutput = output;
if (shExecOutput != null) {
for (String str : shExecOutput.split("\n")) {
LOG.info(str);
}
}
}
/**
* Get the pidFile of the container.
*
* @param containerId the container ID
* @return the path of the pid-file for the given containerId.
*/
protected Path getPidFilePath(ContainerId containerId) {
return this.pidFiles.get(containerId);
}
/**
* Return a command line to execute the given command in the OS shell.
* On Windows, the {code}groupId{code} parameter can be used to launch
* and associate the given GID with a process group. On
* non-Windows hosts, the {code}groupId{code} parameter is ignored.
*
* @param command the command to execute
* @param groupId the job owner's GID
* @param userName the job owner's username
* @param pidFile the path to the container's PID file
* @param config the configuration
* @return the command line to execute
*/
protected String[] getRunCommand(String command, String groupId,
String userName, Path pidFile, Configuration config) {
return getRunCommand(command, groupId, userName, pidFile, config, null);
}
/**
* Return a command line to execute the given command in the OS shell.
* On Windows, the {code}groupId{code} parameter can be used to launch
* and associate the given GID with a process group. On
* non-Windows hosts, the {code}groupId{code} parameter is ignored.
*
* @param command the command to execute
* @param groupId the job owner's GID for Windows. On other operating systems
* it is ignored.
* @param userName the job owner's username for Windows. On other operating
* systems it is ignored.
* @param pidFile the path to the container's PID file on Windows. On other
* operating systems it is ignored.
* @param config the configuration
* @param resource on Windows this parameter controls memory and CPU limits.
* If null, no limits are set. On other operating systems it is ignored.
* @return the command line to execute
*/
protected String[] getRunCommand(String command, String groupId,
String userName, Path pidFile, Configuration config, Resource resource) {
if (Shell.WINDOWS) {
return getRunCommandForWindows(command, groupId, userName, pidFile,
config, resource);
} else {
return getRunCommandForOther(command, config);
}
}
/**
* Return a command line to execute the given command in the OS shell.
* The {code}groupId{code} parameter can be used to launch
* and associate the given GID with a process group.
*
* @param command the command to execute
* @param groupId the job owner's GID
* @param userName the job owner's username
* @param pidFile the path to the container's PID file
* @param config the configuration
* @param resource this parameter controls memory and CPU limits.
* If null, no limits are set.
* @return the command line to execute
*/
protected String[] getRunCommandForWindows(String command, String groupId,
String userName, Path pidFile, Configuration config, Resource resource) {
int cpuRate = -1;
int memory = -1;
if (resource != null) {
if (config.getBoolean(
YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED,
YarnConfiguration.
DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED)) {
memory = (int) resource.getMemorySize();
}
if (config.getBoolean(
YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED,
YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED)) {
int containerVCores = resource.getVirtualCores();
int nodeVCores = NodeManagerHardwareUtils.getVCores(config);
int nodeCpuPercentage =
NodeManagerHardwareUtils.getNodeCpuPercentage(config);
float containerCpuPercentage =
(float)(nodeCpuPercentage * containerVCores) / nodeVCores;
// CPU should be set to a percentage * 100, e.g. 20% cpu rate limit
// should be set as 20 * 100.
cpuRate = Math.min(10000, (int)(containerCpuPercentage * 100));
}
}
return new String[] {
Shell.getWinUtilsPath(),
"task",
"create",
"-m",
String.valueOf(memory),
"-c",
String.valueOf(cpuRate),
groupId,
"cmd /c " + command
};
}
/**
* Return a command line to execute the given command in the OS shell.
*
* @param command the command to execute
* @param config the configuration
* @return the command line to execute
*/
protected String[] getRunCommandForOther(String command,
Configuration config) {
List<String> retCommand = new ArrayList<>();
boolean containerSchedPriorityIsSet = false;
int containerSchedPriorityAdjustment =
YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY;
if (config.get(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY) !=
null) {
containerSchedPriorityIsSet = true;
containerSchedPriorityAdjustment = config
.getInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,
YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY);
}
if (containerSchedPriorityIsSet) {
retCommand.addAll(Arrays.asList("nice", "-n",
Integer.toString(containerSchedPriorityAdjustment)));
}
retCommand.addAll(Arrays.asList("bash", command));
return retCommand.toArray(new String[retCommand.size()]);
}
/**
* Return whether the container is still active.
*
* @param containerId the target container's ID
* @return true if the container is active
*/
protected boolean isContainerActive(ContainerId containerId) {
return this.pidFiles.containsKey(containerId);
}
@VisibleForTesting
protected String getNMEnvVar(String varname) {
return System.getenv(varname);
}
/**
* Mark the container as active.
*
* @param containerId the container ID
* @param pidFilePath the path where the executor should write the PID
* of the launched process
*/
public void activateContainer(ContainerId containerId, Path pidFilePath) {
this.pidFiles.put(containerId, pidFilePath);
}
// LinuxContainerExecutor overrides this method and behaves differently.
public String[] getIpAndHost(Container container)
throws ContainerExecutionException {
return getLocalIpAndHost(container);
}
// ipAndHost[0] contains ip.
// ipAndHost[1] contains hostname.
public static String[] getLocalIpAndHost(Container container) {
String[] ipAndHost = new String[2];
try {
InetAddress address = InetAddress.getLocalHost();
ipAndHost[0] = address.getHostAddress();
ipAndHost[1] = address.getHostName();
} catch (UnknownHostException e) {
LOG.error("Unable to get Local hostname and ip for {}", container
.getContainerId(), e);
}
return ipAndHost;
}
/**
* Mark the container as inactive. For inactive containers this
* method has no effect.
*
* @param containerId the container ID
*/
public void deactivateContainer(ContainerId containerId) {
this.pidFiles.remove(containerId);
}
/**
* Pause the container. The default implementation is to raise a kill event.
* Specific executor implementations can override this behavior.
* @param container
* the Container
*/
public void pauseContainer(Container container) {
LOG.warn("{} doesn't support pausing.", container.getContainerId());
throw new UnsupportedOperationException();
}
/**
* Resume the container from pause state. The default implementation ignores
* this event. Specific implementations can override this behavior.
* @param container
* the Container
*/
public void resumeContainer(Container container) {
LOG.warn("{} doesn't support resume.", container.getContainerId());
throw new UnsupportedOperationException();
}
/**
* Perform any cleanup before the next launch of the container.
* @param container container
*/
public void cleanupBeforeRelaunch(Container container)
throws IOException, InterruptedException {
if (container.getLocalizedResources() != null) {
Map<Path, Path> symLinks = resolveSymLinks(
container.getLocalizedResources(), container.getUser());
for (Map.Entry<Path, Path> symLink : symLinks.entrySet()) {
LOG.debug("{} deleting {}", container.getContainerId(),
symLink.getValue());
deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(container.getUser())
.setSubDir(symLink.getValue())
.build());
}
}
}
/**
* Get the process-identifier for the container.
*
* @param containerID the container ID
* @return the process ID of the container if it has already launched,
* or null otherwise
*/
public String getProcessId(ContainerId containerID) {
String pid = null;
Path pidFile = pidFiles.get(containerID);
// If PID is null, this container hasn't launched yet.
if (pidFile != null) {
try {
pid = ProcessIdFileReader.getProcessId(pidFile);
} catch (IOException e) {
LOG.error("Got exception reading pid from pid-file {}", pidFile, e);
}
}
return pid;
}
/**
* This
|
Signal
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/sql/FlinkCurrentDateDynamicFunction.java
|
{
"start": 1369,
"end": 2476
}
|
class ____ extends SqlCurrentDateFunction {
private final boolean isBatchMode;
public FlinkCurrentDateDynamicFunction(boolean isBatchMode) {
this.isBatchMode = isBatchMode;
}
@Override
public boolean isDynamicFunction() {
return isBatchMode && super.isDynamicFunction();
}
@Override
public boolean isDeterministic() {
// be a non-deterministic function in streaming mode
return isBatchMode;
}
@Override
public boolean equals(@Nullable Object obj) {
if (!(obj instanceof FlinkCurrentDateDynamicFunction)) {
return false;
}
if (!obj.getClass().equals(this.getClass())) {
return false;
}
FlinkCurrentDateDynamicFunction other = (FlinkCurrentDateDynamicFunction) obj;
return this.getName().equals(other.getName())
&& kind == other.kind
&& this.isBatchMode == other.isBatchMode;
}
@Override
public int hashCode() {
return Objects.hash(kind, this.getName(), isBatchMode);
}
}
|
FlinkCurrentDateDynamicFunction
|
java
|
apache__camel
|
dsl/camel-xml-io-dsl/src/test/java/org/apache/camel/dsl/xml/io/beans/MyFacHelper.java
|
{
"start": 854,
"end": 1001
}
|
class ____ {
public static MyFacBean createBean(String field1, String field2) {
return new MyFacBean(field1, field2);
}
}
|
MyFacHelper
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configuration/WebSecurityConfigurationTests.java
|
{
"start": 29047,
"end": 30167
}
|
class ____ {
@Bean
@Order(Ordered.HIGHEST_PRECEDENCE)
public SecurityFilterChain notAuthorized(HttpSecurity http) throws Exception {
// @formatter:off
http
.securityMatchers((requests) -> requests.requestMatchers(pathPattern("/user")))
.authorizeHttpRequests((requests) -> requests.anyRequest().hasRole("USER"));
// @formatter:on
return http.build();
}
@Bean
@Order(Ordered.HIGHEST_PRECEDENCE + 1)
public SecurityFilterChain path1(HttpSecurity http) throws Exception {
// @formatter:off
http
.securityMatchers((requests) -> requests.requestMatchers(pathPattern("/admin")))
.authorizeHttpRequests((requests) -> requests.anyRequest().hasRole("ADMIN"));
// @formatter:on
return http.build();
}
@Bean
@Order(Ordered.LOWEST_PRECEDENCE)
public SecurityFilterChain permitAll(HttpSecurity http) throws Exception {
http.authorizeHttpRequests((requests) -> requests.anyRequest().permitAll());
return http.build();
}
}
@Configuration
@EnableWebSecurity
@EnableWebMvc
@Import(AuthenticationTestConfiguration.class)
static
|
MultipleSecurityFilterChainConfig
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/mapping/UnidirectionalOneToManyIndexColumnTest.java
|
{
"start": 2539,
"end": 3062
}
|
class ____ {
@Id
@GeneratedValue
private int id;
@OneToMany(targetEntity = Child.class, cascade = CascadeType.ALL)
@OrderColumn(name = "position")
private List<Child> children = new ArrayList<>();
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public List<Child> getChildren() {
return children;
}
public void setChildren(List<Child> children) {
this.children = children;
}
}
@Entity( name = "Child" )
@Table(name = "CHILD")
public static
|
Parent
|
java
|
apache__kafka
|
trogdor/src/main/java/org/apache/kafka/trogdor/rest/CreateWorkerRequest.java
|
{
"start": 1065,
"end": 1729
}
|
class ____ extends Message {
private final long workerId;
private final String taskId;
private final TaskSpec spec;
@JsonCreator
public CreateWorkerRequest(@JsonProperty("workerId") long workerId,
@JsonProperty("taskId") String taskId,
@JsonProperty("spec") TaskSpec spec) {
this.workerId = workerId;
this.taskId = taskId;
this.spec = spec;
}
@JsonProperty
public long workerId() {
return workerId;
}
@JsonProperty
public String taskId() {
return taskId;
}
@JsonProperty
public TaskSpec spec() {
return spec;
}
}
|
CreateWorkerRequest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java
|
{
"start": 1391,
"end": 1690
}
|
class ____ {
public static final String ACTION_NAME = "internal:admin/repository/verify";
public static final ActionType<ActionResponse.Empty> TYPE = new ActionType<>(ACTION_NAME);
// no construction
private VerifyNodeRepositoryAction() {}
public static
|
VerifyNodeRepositoryAction
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/io/PackageSanityTests.java
|
{
"start": 1013,
"end": 1437
}
|
class ____ extends AbstractPackageSanityTests {
public PackageSanityTests() {
setDefault(BaseEncoding.class, BaseEncoding.base64());
setDefault(int.class, 32);
setDefault(String.class, "abcd");
setDefault(Method.class, AbstractPackageSanityTests.class.getDeclaredMethods()[0]);
setDefault(MapMode.class, MapMode.READ_ONLY);
setDefault(CharsetEncoder.class, UTF_8.newEncoder());
}
}
|
PackageSanityTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
|
{
"start": 36357,
"end": 36460
}
|
interface ____ {
void count(String partition, Resource resource);
}
@FunctionalInterface
|
Counter
|
java
|
apache__maven
|
impl/maven-impl/src/main/java/org/apache/maven/impl/StaxLocation.java
|
{
"start": 944,
"end": 2313
}
|
class ____ implements Location {
private final javax.xml.stream.Location location;
public static Location getLocation(Exception e) {
return toLocation(e instanceof XMLStreamException xe ? xe.getLocation() : null);
}
public static Location toLocation(javax.xml.stream.Location location) {
return location != null ? new StaxLocation(location) : null;
}
public static String getMessage(Exception e) {
String message = e.getMessage();
if (e instanceof XMLStreamException xe && xe.getLocation() != null) {
int idx = message.indexOf("\nMessage: ");
if (idx >= 0) {
return message.substring(idx + "\nMessage: ".length());
}
}
return message;
}
public StaxLocation(javax.xml.stream.Location location) {
this.location = location;
}
@Override
public int getLineNumber() {
return location.getLineNumber();
}
@Override
public int getColumnNumber() {
return location.getColumnNumber();
}
@Override
public int getCharacterOffset() {
return location.getCharacterOffset();
}
@Override
public String getPublicId() {
return location.getPublicId();
}
@Override
public String getSystemId() {
return location.getSystemId();
}
}
|
StaxLocation
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelActionRequestTests.java
|
{
"start": 861,
"end": 2183
}
|
class ____ extends AbstractWireSerializingTestCase<Request> {
@Override
protected Request createTestInstance() {
String modelId = randomAlphaOfLength(10);
return new Request(
TrainedModelConfigTests.createTestInstance(modelId, false)
.setParsedDefinition(TrainedModelDefinitionTests.createSmallRandomBuilder())
.build(),
randomBoolean(),
randomBoolean()
);
}
@Override
protected Request mutateInstance(Request instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<Request> instanceReader() {
return (in) -> {
Request request = new Request(in);
request.getTrainedModelConfig().ensureParsedDefinition(xContentRegistry());
return request;
};
}
@Override
protected NamedXContentRegistry xContentRegistry() {
return new NamedXContentRegistry(new MlInferenceNamedXContentProvider().getNamedXContentParsers());
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(new MlInferenceNamedXContentProvider().getNamedWriteables());
}
}
|
PutTrainedModelActionRequestTests
|
java
|
apache__flink
|
flink-python/src/test/java/org/apache/flink/table/runtime/utils/PassThroughStreamGroupWindowAggregatePythonFunctionRunner.java
|
{
"start": 1887,
"end": 3749
}
|
class ____
extends BeamTablePythonFunctionRunner {
private final PassThroughPythonStreamGroupWindowAggregateOperator operator;
public PassThroughStreamGroupWindowAggregatePythonFunctionRunner(
Environment environment,
String taskName,
ProcessPythonEnvironmentManager environmentManager,
RowType inputType,
RowType outputType,
String functionUrn,
FlinkFnApi.UserDefinedAggregateFunctions userDefinedFunctions,
FlinkMetricContainer flinkMetricContainer,
KeyedStateBackend keyedStateBackend,
TypeSerializer keySerializer,
PassThroughPythonStreamGroupWindowAggregateOperator operator) {
super(
environment,
taskName,
environmentManager,
functionUrn,
userDefinedFunctions,
flinkMetricContainer,
keyedStateBackend,
keySerializer,
null,
null,
0.0,
createFlattenRowTypeCoderInfoDescriptorProto(
inputType, FlinkFnApi.CoderInfoDescriptor.Mode.MULTIPLE, false),
createFlattenRowTypeCoderInfoDescriptorProto(
outputType, FlinkFnApi.CoderInfoDescriptor.Mode.MULTIPLE, false));
this.operator = operator;
}
@Override
protected void startBundle() {
super.startBundle();
this.operator.setResultBuffer(resultBuffer);
this.mainInputReceiver = input -> operator.processPythonElement(input.getValue());
}
@Override
public JobBundleFactory createJobBundleFactory(Struct pipelineOptions) {
return PythonTestUtils.createMockJobBundleFactory();
}
}
|
PassThroughStreamGroupWindowAggregatePythonFunctionRunner
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java
|
{
"start": 1533,
"end": 4391
}
|
class ____ extends ValuesSourceAggregationBuilder.MetricsAggregationBuilder<StatsAggregationBuilder> {
public static final String NAME = "stats";
public static final ValuesSourceRegistry.RegistryKey<MetricAggregatorSupplier> REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(
NAME,
MetricAggregatorSupplier.class
);
public static final ObjectParser<StatsAggregationBuilder, String> PARSER = ObjectParser.fromBuilder(NAME, StatsAggregationBuilder::new);
static {
ValuesSourceAggregationBuilder.declareFields(PARSER, true, true, false);
}
public StatsAggregationBuilder(String name) {
super(name);
}
protected StatsAggregationBuilder(
StatsAggregationBuilder clone,
AggregatorFactories.Builder factoriesBuilder,
Map<String, Object> metadata
) {
super(clone, factoriesBuilder, metadata);
}
public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
StatsAggregatorFactory.registerAggregators(builder);
}
@Override
protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map<String, Object> metadata) {
return new StatsAggregationBuilder(this, factoriesBuilder, metadata);
}
/**
* Read from a stream.
*/
public StatsAggregationBuilder(StreamInput in) throws IOException {
super(in);
}
@Override
public boolean supportsSampling() {
return true;
}
@Override
public Set<String> metricNames() {
return InternalStats.METRIC_NAMES;
}
@Override
protected ValuesSourceType defaultValueSourceType() {
return CoreValuesSourceType.NUMERIC;
}
@Override
protected void innerWriteTo(StreamOutput out) {
// Do nothing, no extra state to write to stream
}
@Override
protected StatsAggregatorFactory innerBuild(
AggregationContext context,
ValuesSourceConfig config,
AggregatorFactory parent,
AggregatorFactories.Builder subFactoriesBuilder
) throws IOException {
MetricAggregatorSupplier aggregatorSupplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config);
return new StatsAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metadata, aggregatorSupplier);
}
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
return builder;
}
@Override
public String getType() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
@Override
public Optional<Set<String>> getOutputFieldNames() {
return Optional.of(InternalStats.METRIC_NAMES);
}
}
|
StatsAggregationBuilder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMaxFromWKBGeoEvaluator.java
|
{
"start": 4715,
"end": 5308
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory wkb;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory wkb) {
this.source = source;
this.wkb = wkb;
}
@Override
public StXMaxFromWKBGeoEvaluator get(DriverContext context) {
return new StXMaxFromWKBGeoEvaluator(source, wkb.get(context), context);
}
@Override
public String toString() {
return "StXMaxFromWKBGeoEvaluator[" + "wkb=" + wkb + "]";
}
}
}
|
Factory
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/StubRelation.java
|
{
"start": 1339,
"end": 3407
}
|
class ____ extends LeafPlan {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
LogicalPlan.class,
"StubRelation",
StubRelation::new
);
private final List<Attribute> output;
public StubRelation(Source source, List<Attribute> output) {
super(source);
this.output = output;
}
/*
* The output of a StubRelation must also include any synthetic attributes referenced by the source plan (union types is a great
* example of those attributes that has some special treatment throughout the planning phases, especially in the EsRelation).
*/
public static List<Attribute> computeOutput(LogicalPlan source, LogicalPlan target) {
Set<Attribute> stubRelationOutput = new LinkedHashSet<>(target.output());
stubRelationOutput.addAll(source.references().stream().filter(Attribute::synthetic).toList());
return new ArrayList<>(stubRelationOutput);
}
public StubRelation(StreamInput in) throws IOException {
this(Source.readFrom((PlanStreamInput) in), emptyList());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
Source.EMPTY.writeTo(out);
}
@Override
public List<Attribute> output() {
return output;
}
@Override
public boolean expressionsResolved() {
return true;
}
@Override
protected NodeInfo<StubRelation> info() {
return NodeInfo.create(this, StubRelation::new, output);
}
@Override
public int hashCode() {
return Objects.hash(StubRelation.class, output);
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
StubRelation other = (StubRelation) obj;
return Objects.equals(output, other.output());
}
}
|
StubRelation
|
java
|
apache__rocketmq
|
proxy/src/test/java/org/apache/rocketmq/proxy/service/message/ClusterMessageServiceTest.java
|
{
"start": 1844,
"end": 3478
}
|
class ____ {
private TopicRouteService topicRouteService;
private ClusterMessageService clusterMessageService;
@Before
public void before() {
this.topicRouteService = mock(TopicRouteService.class);
MQClientAPIFactory mqClientAPIFactory = mock(MQClientAPIFactory.class);
this.clusterMessageService = new ClusterMessageService(this.topicRouteService, mqClientAPIFactory);
}
@Test
public void testAckMessageByInvalidBrokerNameHandle() throws Exception {
when(topicRouteService.getBrokerAddr(any(), anyString())).thenThrow(new MQClientException(ResponseCode.TOPIC_NOT_EXIST, ""));
try {
this.clusterMessageService.ackMessage(
ProxyContext.create(),
ReceiptHandle.builder()
.startOffset(0L)
.retrieveTime(System.currentTimeMillis())
.invisibleTime(3000)
.reviveQueueId(1)
.topicType(ReceiptHandle.NORMAL_TOPIC)
.brokerName("notExistBroker")
.queueId(0)
.offset(123)
.commitLogOffset(0L)
.build(),
MessageClientIDSetter.createUniqID(),
new AckMessageRequestHeader(),
3000);
fail();
} catch (Exception e) {
assertTrue(e instanceof ProxyException);
ProxyException proxyException = (ProxyException) e;
assertEquals(ProxyExceptionCode.INVALID_RECEIPT_HANDLE, proxyException.getCode());
}
}
}
|
ClusterMessageServiceTest
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webmvc/src/test/java/org/springframework/cloud/gateway/server/mvc/TokenRelayFilterFunctionsTests.java
|
{
"start": 2483,
"end": 6059
}
|
class ____ {
private final List<HttpMessageConverter<?>> converters = new HttpMessageConverters().getConverters();
private MockHttpServletRequest request;
private OAuth2AuthorizedClientManager authorizedClientManager;
private WebApplicationContext applicationContext;
private HandlerFilterFunction<ServerResponse, ServerResponse> filter;
@BeforeEach
@SuppressWarnings("unchecked")
public void init() {
request = MockMvcRequestBuilders.get("/hello").buildRequest(new MockServletContext());
authorizedClientManager = mock(OAuth2AuthorizedClientManager.class);
applicationContext = mock(WebApplicationContext.class);
request.setAttribute(DispatcherServlet.WEB_APPLICATION_CONTEXT_ATTRIBUTE, applicationContext);
when(applicationContext.getBean(OAuth2AuthorizedClientManager.class)).thenReturn(authorizedClientManager);
filter = TokenRelayFilterFunctions.tokenRelay();
}
@Test
public void emptyPrincipal() throws Exception {
filter.filter(ServerRequest.create(request, converters), req -> {
assertThat(req.headers().asHttpHeaders().containsHeader(HttpHeaders.AUTHORIZATION)).isFalse();
return null;
});
}
@Test
public void whenPrincipalExistsAuthorizationHeaderAdded() throws Exception {
OAuth2AccessToken accessToken = mock(OAuth2AccessToken.class);
when(accessToken.getTokenValue()).thenReturn("mytoken");
ClientRegistration clientRegistration = ClientRegistration.withRegistrationId("myregistrationid")
.authorizationGrantType(AuthorizationGrantType.CLIENT_CREDENTIALS)
.clientId("myclientid")
.tokenUri("mytokenuri")
.build();
OAuth2AuthorizedClient authorizedClient = new OAuth2AuthorizedClient(clientRegistration, "joe", accessToken);
when(authorizedClientManager.authorize(any(OAuth2AuthorizeRequest.class))).thenReturn(authorizedClient);
OAuth2AuthenticationToken authenticationToken = new OAuth2AuthenticationToken(mock(OAuth2User.class),
Collections.emptyList(), "myId");
request.setUserPrincipal(authenticationToken);
filter.filter(ServerRequest.create(request, converters), req -> {
assertThat(req.headers().firstHeader(HttpHeaders.AUTHORIZATION)).isEqualTo("Bearer mytoken");
return null;
});
}
@Test
public void whenDefaultClientRegistrationIdProvidedAuthorizationHeaderAdded() throws Exception {
OAuth2AccessToken accessToken = mock(OAuth2AccessToken.class);
when(accessToken.getTokenValue()).thenReturn("mytoken");
ClientRegistration clientRegistration = ClientRegistration.withRegistrationId("myregistrationid")
.authorizationGrantType(AuthorizationGrantType.CLIENT_CREDENTIALS)
.clientId("myclientid")
.tokenUri("mytokenuri")
.build();
OAuth2AuthorizedClient authorizedClient = new OAuth2AuthorizedClient(clientRegistration, "joe", accessToken);
when(authorizedClientManager.authorize(any(OAuth2AuthorizeRequest.class))).thenReturn(authorizedClient);
request.setUserPrincipal(new TestingAuthenticationToken("my", null));
filter = TokenRelayFilterFunctions.tokenRelay("myId");
filter.filter(ServerRequest.create(request, converters), req -> {
assertThat(req.headers().firstHeader(HttpHeaders.AUTHORIZATION)).isEqualTo("Bearer mytoken");
return null;
});
}
@Test
public void principalIsNotOAuth2AuthenticationToken() throws Exception {
request.setUserPrincipal(new TestingAuthenticationToken("my", null));
filter.filter(ServerRequest.create(request, converters), req -> {
assertThat(req.headers().asHttpHeaders().containsHeader(HttpHeaders.AUTHORIZATION)).isFalse();
return null;
});
}
}
|
TokenRelayFilterFunctionsTests
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_11.java
|
{
"start": 1208,
"end": 2547
}
|
class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "select * from users where uid = :uid";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
SQLSelectStatement selectStmt = (SQLSelectStatement) stmt;
SQLSelect select = selectStmt.getSelect();
assertNotNull(select.getQuery());
MySqlSelectQueryBlock queryBlock = (MySqlSelectQueryBlock) select.getQuery();
assertNull(queryBlock.getOrderBy());
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
assertEquals(1, visitor.getTables().size());
assertEquals(2, visitor.getColumns().size());
assertEquals(1, visitor.getConditions().size());
assertEquals(0, visitor.getOrderByColumns().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("users")));
String output = SQLUtils.toMySqlString(stmt);
assertEquals("SELECT *" +
"\nFROM users" +
"\nWHERE uid = :uid", //
output);
}
}
|
MySqlSelectTest_11
|
java
|
quarkusio__quarkus
|
integration-tests/spring-data-jpa/src/test/java/io/quarkus/it/spring/data/jpa/MovieResourceIT.java
|
{
"start": 125,
"end": 177
}
|
class ____ extends MovieResourceTest {
}
|
MovieResourceIT
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/BeanDefinition.java
|
{
"start": 1344,
"end": 3947
}
|
class ____ extends NoOutputDefinition<BeanDefinition> {
@XmlTransient
private Class<?> beanClass;
@XmlTransient
private Object bean;
@XmlAttribute
private String ref;
@XmlAttribute
private String method;
@XmlAttribute
private String beanType;
@XmlAttribute
@Metadata(label = "advanced", defaultValue = "Singleton", enums = "Singleton,Request,Prototype")
private String scope;
public BeanDefinition() {
}
protected BeanDefinition(BeanDefinition source) {
super(source);
this.beanClass = source.beanClass;
this.bean = source.bean;
this.ref = source.ref;
this.method = source.method;
this.beanType = source.beanType;
this.scope = source.scope;
}
public BeanDefinition(String ref) {
this.ref = ref;
}
public BeanDefinition(String ref, String method) {
this.ref = ref;
this.method = method;
}
@Override
public BeanDefinition copyDefinition() {
return new BeanDefinition(this);
}
@Override
public String toString() {
return "Bean[" + description() + "]";
}
public String description() {
if (ref != null) {
String methodText = "";
if (method != null) {
methodText = " method:" + method;
}
return "ref:" + ref + methodText;
} else if (bean != null) {
return ObjectHelper.className(bean);
} else if (beanClass != null) {
return beanClass.getName();
} else if (beanType != null) {
return beanType;
} else {
return "";
}
}
@Override
public String getShortName() {
return "bean";
}
@Override
public String getLabel() {
return "bean[" + description() + "]";
}
public String getRef() {
return ref;
}
/**
* Sets a reference to an existing bean to use, which is looked up from the registry
*/
public void setRef(String ref) {
this.ref = ref;
}
public String getMethod() {
return method;
}
/**
* Sets the method name on the bean to use
*/
public void setMethod(String method) {
this.method = method;
}
/**
* Sets an existing instance of the bean to use
*/
public void setBean(Object bean) {
this.bean = bean;
}
public Object getBean() {
return bean;
}
public String getBeanType() {
return beanType;
}
/**
* Sets the
|
BeanDefinition
|
java
|
apache__maven
|
impl/maven-core/src/main/java/org/apache/maven/internal/impl/DefaultProjectBuilder.java
|
{
"start": 8219,
"end": 9034
}
|
class ____ implements ModelSource2 {
private final Source source;
SourceWrapper(Source source) {
this.source = source;
}
@Override
public InputStream getInputStream() throws IOException {
return source.openStream();
}
@Override
public String getLocation() {
return source.getLocation();
}
@Override
public ModelSource2 getRelatedSource(String relPath) {
Source rel = source.resolve(relPath);
return rel != null ? new SourceWrapper(rel) : null;
}
@Override
public URI getLocationURI() {
Path path = source.getPath();
return path != null ? path.toUri() : URI.create(source.getLocation());
}
}
}
|
SourceWrapper
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/nullness/NullArgumentForNonNullParameterTest.java
|
{
"start": 4523,
"end": 4956
}
|
class ____ {
void foo() {
// BUG: Diagnostic contains:
ArgumentCaptor.forClass(null);
}
}
""")
.doTest();
}
@Test
public void negativeNullMarkedComGoogleCommonButNullable() {
conservativeHelper
.addSourceLines(
"Foo.java",
"""
import com.google.common.collect.ImmutableSet;
|
Foo
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bool/Person.java
|
{
"start": 192,
"end": 1098
}
|
class ____ {
private Boolean married;
private Boolean engaged;
private YesNo divorced;
private YesNo widowed;
public Boolean isMarried() {
return married;
}
public void setMarried(Boolean married) {
this.married = married;
}
// START: please note: deliberately ordered, first getEngaged, then isEngaged.
public Boolean getEngaged() {
return engaged;
}
public Boolean isEngaged() {
return engaged != null && !engaged;
}
// END
public void setEngaged(Boolean engaged) {
this.engaged = engaged;
}
public YesNo getDivorced() {
return divorced;
}
public void setDivorced(YesNo divorced) {
this.divorced = divorced;
}
public YesNo getWidowed() {
return widowed;
}
public void setWidowed(YesNo widowed) {
this.widowed = widowed;
}
}
|
Person
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/postgresql/issues/Issue5760.java
|
{
"start": 400,
"end": 1095
}
|
class ____ {
@Test
public void test_parse_error_sql() {
for (String sql : new String[]{
"Vacuum verbose ",
"Vacuum verbose;",
"Vacuum verbose full",
"Vacuum verbose full;",
"Vacuum verbose; select a from b", "Vacuum verbose full"
+ ";"
+ "Vacuum verbose bbbb;",
}) {
SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, DbType.postgresql);
List<SQLStatement> statementList = parser.parseStatementList();
System.out.println("原始的sql===" + sql);
System.out.println("生成的sql===" + statementList);
}
}
}
|
Issue5760
|
java
|
spring-projects__spring-boot
|
module/spring-boot-micrometer-tracing-opentelemetry/src/main/java/org/springframework/boot/micrometer/tracing/opentelemetry/testcontainers/otlp/OpenTelemetryTracingContainerConnectionDetailsFactory.java
|
{
"start": 2301,
"end": 2863
}
|
class ____
extends ContainerConnectionDetails<Container<?>> implements OtlpTracingConnectionDetails {
private OpenTelemetryTracingContainerConnectionDetails(ContainerConnectionSource<Container<?>> source) {
super(source);
}
@Override
public String getUrl(Transport transport) {
int port = switch (transport) {
case HTTP -> OTLP_HTTP_PORT;
case GRPC -> OTLP_GRPC_PORT;
};
return "http://%s:%d/v1/traces".formatted(getContainer().getHost(), getContainer().getMappedPort(port));
}
}
}
|
OpenTelemetryTracingContainerConnectionDetails
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/schemaupdate/foreignkeys/definition/ForeignKeyDefinitionSecondaryTableTest.java
|
{
"start": 1113,
"end": 1386
}
|
class ____ {
@Id
@GeneratedValue
private int id;
private String emailAddress;
@Column(name = "SECURITY_USERNAME", table = "User_details")
private String username;
@Column(name = "SECURITY_PASSWORD", table = "User_details")
private String password;
}
}
|
User
|
java
|
quarkusio__quarkus
|
integration-tests/maven/src/test/resources-filtered/projects/tests-in-jar/src/main/java/org/acme/HelloResource.java
|
{
"start": 164,
"end": 308
}
|
class ____ {
@GET
@Produces(MediaType.TEXT_PLAIN)
public String greeting() {
return "hello jar friends";
}
}
|
HelloResource
|
java
|
spring-projects__spring-boot
|
module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/info/InfoEndpointTests.java
|
{
"start": 994,
"end": 1600
}
|
class ____ {
@Test
void info() {
InfoEndpoint endpoint = new InfoEndpoint(Arrays.asList((builder) -> builder.withDetail("key1", "value1"),
(builder) -> builder.withDetail("key2", "value2")));
Map<String, Object> info = endpoint.info();
assertThat(info).hasSize(2);
assertThat(info).containsEntry("key1", "value1");
assertThat(info).containsEntry("key2", "value2");
}
@Test
void infoWithNoContributorsProducesEmptyMap() {
InfoEndpoint endpoint = new InfoEndpoint(Collections.emptyList());
Map<String, Object> info = endpoint.info();
assertThat(info).isEmpty();
}
}
|
InfoEndpointTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/collections/ElementCollectionMapTest.java
|
{
"start": 1681,
"end": 1765
}
|
enum ____ {
LAND_LINE,
MOBILE
}
@Entity(name = "Person")
public static
|
PhoneType
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/MockitoUsageTest.java
|
{
"start": 4601,
"end": 5262
}
|
class ____ {
void test() {
Foo mock = mock(Foo.class);
// BUG: Diagnostic contains:
// Missing method call for verify(mock.execute()) here
// verify(mock).execute();
verify(mock.execute());
}
}
""")
.doTest();
}
@Test
public void positive_verify_noMethod() {
compilationHelper
.addSourceLines("Foo.java", FOO_SOURCE)
.addSourceLines(
"Test.java",
"""
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
|
Test
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java
|
{
"start": 13264,
"end": 13789
}
|
class ____ extends NodeManager {
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
MockNodeStatusUpdater myNodeStatusUpdater =
new MockNodeStatusUpdater(context, dispatcher, healthChecker, metrics);
return myNodeStatusUpdater;
}
public void setMasterKey(MasterKey masterKey) {
getNMContext().getContainerTokenSecretManager().setMasterKey(masterKey);
}
}
}
|
TestNodeManager
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/config/ExplicitAuditingViaJavaConfigRepositoriesTests.java
|
{
"start": 1229,
"end": 1448
}
|
class ____ extends AbstractAuditingViaJavaConfigRepositoriesTests {
@Configuration
@EnableJpaAuditing(auditorAwareRef = "auditorProvider")
@Import(TestConfig.class)
static
|
ExplicitAuditingViaJavaConfigRepositoriesTests
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/fixes/SuggestedFixesTest.java
|
{
"start": 65487,
"end": 65842
}
|
class ____ {
Foo get() {
return null;
}
}
""")
.doTest();
}
@Test
public void renameClass_selfReferential() {
BugCheckerRefactoringTestHelper.newInstance(RenameClassChecker.class, getClass())
.addInputLines(
"Test.java",
"""
|
Foo
|
java
|
google__guava
|
android/guava-testlib/src/com/google/common/collect/testing/google/ListGenerators.java
|
{
"start": 3102,
"end": 3540
}
|
class ____ extends TestStringListGenerator {
@Override
protected List<String> create(String[] elements) {
String[] prefix = {"f", "g"};
String[] all = new String[elements.length + prefix.length];
arraycopy(prefix, 0, all, 0, 2);
arraycopy(elements, 0, all, 2, elements.length);
return ImmutableList.copyOf(all).subList(2, elements.length + 2);
}
}
public static
|
ImmutableListTailSubListGenerator
|
java
|
apache__spark
|
sql/api/src/main/java/org/apache/spark/sql/api/java/UDF4.java
|
{
"start": 980,
"end": 1094
}
|
interface ____<T1, T2, T3, T4, R> extends Serializable {
R call(T1 t1, T2 t2, T3 t3, T4 t4) throws Exception;
}
|
UDF4
|
java
|
quarkusio__quarkus
|
independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/BuiltinBean.java
|
{
"start": 27800,
"end": 28774
}
|
class ____ parameterized type"));
}
if (!ctx.injectionTarget.asBean().getProviderType().equals(interceptionProxyType)) {
String msg = ctx.injectionTarget.asBean().isProducerMethod()
? "Type argument of InterceptionProxy must be equal to the return type of the producer method"
: "Type argument of InterceptionProxy must be equal to the bean provider type";
ctx.errors.accept(new DefinitionException(msg));
}
ClassInfo clazz = getClassByName(ctx.beanDeployment.getBeanArchiveIndex(), interceptionProxyType.name());
if (clazz != null) {
if (clazz.isRecord()) {
ctx.errors.accept(new DefinitionException("Cannot build InterceptionProxy for a record"));
}
if (clazz.isSealed()) {
ctx.errors.accept(new DefinitionException("Cannot build InterceptionProxy for a sealed type"));
}
}
}
}
|
or
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java
|
{
"start": 3086,
"end": 29737
}
|
class ____ extends ESAllocationTestCase {
private final ShardId shardId = new ShardId("test", "_na_", 0);
private final DiscoveryNode node1 = newNode("node1");
private final DiscoveryNode node2 = newNode("node2");
private final DiscoveryNode node3 = newNode("node3");
private TestAllocator testAllocator;
@Before
public void buildTestAllocator() {
this.testAllocator = new TestAllocator();
}
private void allocateAllUnassigned(final RoutingAllocation allocation) {
final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator();
while (iterator.hasNext()) {
testAllocator.allocateUnassigned(iterator.next(), allocation, iterator);
}
}
public void testNoProcessPrimaryNotAllocatedBefore() {
final RoutingAllocation allocation;
// with old version, we can't know if a shard was allocated before or not
allocation = routingAllocationWithOnePrimaryNoReplicas(
yesAllocationDeciders(),
randomFrom(INDEX_CREATED, CLUSTER_RECOVERED, INDEX_REOPENED)
);
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(false));
assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().iterator().next().shardId(), equalTo(shardId));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when async fetch returns that there is no data, the shard will not be allocated.
*/
public void testNoAsyncFetchData() {
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(
yesAllocationDeciders(),
CLUSTER_RECOVERED,
"allocId"
);
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests when the node returns that no data was found for it (null for allocation id),
* it will be moved to ignore unassigned.
*/
public void testNoAllocationFound() {
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(
yesAllocationDeciders(),
CLUSTER_RECOVERED,
"allocId"
);
testAllocator.addData(node1, null, randomBoolean());
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests when the node returns data with a shard allocation id that does not match active allocation ids, it will be moved to ignore
* unassigned.
*/
public void testNoMatchingAllocationIdFound() {
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, "id2");
testAllocator.addData(node1, "id1", randomBoolean());
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests when the node returns that no data was found for it, it will be moved to ignore unassigned.
*/
public void testStoreException() {
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(
yesAllocationDeciders(),
CLUSTER_RECOVERED,
"allocId1"
);
testAllocator.addData(node1, "allocId1", randomBoolean(), new CorruptIndexException("test", "test"));
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when the node returns a ShardLockObtainFailedException, it will be considered as a valid shard copy
*/
public void testShardLockObtainFailedException() {
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(
yesAllocationDeciders(),
CLUSTER_RECOVERED,
"allocId1"
);
testAllocator.addData(node1, "allocId1", randomBoolean(), new ShardLockObtainFailedException(shardId, "test"));
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
equalTo(node1.getId())
);
// check that allocation id is reused
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).allocationId().getId(),
equalTo("allocId1")
);
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when one node returns a ShardLockObtainFailedException and another properly loads the store, it will
* select the second node as target
*/
public void testShardLockObtainFailedExceptionPreferOtherValidCopies() {
String allocId1 = randomAlphaOfLength(10);
String allocId2 = randomAlphaOfLength(10);
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(
yesAllocationDeciders(),
CLUSTER_RECOVERED,
allocId1,
allocId2
);
testAllocator.addData(node1, allocId1, randomBoolean(), new ShardLockObtainFailedException(shardId, "test"));
testAllocator.addData(node2, allocId2, randomBoolean(), null);
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
equalTo(node2.getId())
);
// check that allocation id is reused
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).allocationId().getId(),
equalTo(allocId2)
);
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when there is a node to allocate the shard to, it will be allocated to it.
*/
public void testFoundAllocationAndAllocating() {
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(
yesAllocationDeciders(),
randomFrom(CLUSTER_RECOVERED, INDEX_REOPENED),
"allocId1"
);
testAllocator.addData(node1, "allocId1", randomBoolean());
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
equalTo(node1.getId())
);
// check that allocation id is reused
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).allocationId().getId(),
equalTo("allocId1")
);
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when the nodes with prior copies of the given shard all return a decision of NO, but
* {@link AllocationDecider#canForceAllocatePrimary(ShardRouting, RoutingNode, RoutingAllocation)}
* returns a YES decision for at least one of those NO nodes, then we force allocate to one of them
*/
public void testForceAllocatePrimary() {
testAllocator.addData(node1, "allocId1", randomBoolean());
AllocationDeciders deciders = new AllocationDeciders(
Arrays.asList(
// since the deciders return a NO decision for allocating a shard (due to the guaranteed NO decision from the second
// decider),
// the allocator will see if it can force assign the primary, where the decision will be YES
new TestAllocateDecision(randomBoolean() ? Decision.YES : Decision.NO),
getNoDeciderThatAllowsForceAllocate()
)
);
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, "allocId1");
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertTrue(allocation.routingNodes().unassigned().ignored().isEmpty());
assertEquals(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), 1);
assertEquals(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(), node1.getId());
}
/**
* Tests that when the nodes with prior copies of the given shard all return a decision of NO, and
* {@link AllocationDecider#canForceAllocatePrimary(ShardRouting, RoutingNode, RoutingAllocation)}
* returns a NO or THROTTLE decision for a node, then we do not force allocate to that node.
*/
public void testDontAllocateOnNoOrThrottleForceAllocationDecision() {
testAllocator.addData(node1, "allocId1", randomBoolean());
boolean forceDecisionNo = randomBoolean();
AllocationDeciders deciders = new AllocationDeciders(
Arrays.asList(
// since both deciders here return a NO decision for allocating a shard,
// the allocator will see if it can force assign the primary, where the decision will be either NO or THROTTLE,
// so the shard will remain un-initialized
new TestAllocateDecision(Decision.NO),
forceDecisionNo ? getNoDeciderThatDeniesForceAllocate() : getNoDeciderThatThrottlesForceAllocate()
)
);
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, "allocId1");
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
List<ShardRouting> ignored = allocation.routingNodes().unassigned().ignored();
assertEquals(ignored.size(), 1);
assertEquals(
ignored.get(0).unassignedInfo().lastAllocationStatus(),
forceDecisionNo ? AllocationStatus.DECIDERS_NO : AllocationStatus.DECIDERS_THROTTLED
);
assertTrue(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).isEmpty());
}
/**
* Tests that when the nodes with prior copies of the given shard return a THROTTLE decision,
* then we do not force allocate to that node but instead throttle.
*/
public void testDontForceAllocateOnThrottleDecision() {
testAllocator.addData(node1, "allocId1", randomBoolean());
AllocationDeciders deciders = new AllocationDeciders(
Arrays.asList(
// since we have a NO decision for allocating a shard (because the second decider returns a NO decision),
// the allocator will see if it can force assign the primary, and in this case,
// the TestAllocateDecision's decision for force allocating is to THROTTLE (using
// the default behavior) so despite the other decider's decision to return YES for
// force allocating the shard, we still THROTTLE due to the decision from TestAllocateDecision
new TestAllocateDecision(Decision.THROTTLE),
getNoDeciderThatAllowsForceAllocate()
)
);
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, "allocId1");
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
List<ShardRouting> ignored = allocation.routingNodes().unassigned().ignored();
assertEquals(ignored.size(), 1);
assertEquals(ignored.get(0).unassignedInfo().lastAllocationStatus(), AllocationStatus.DECIDERS_THROTTLED);
assertTrue(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).isEmpty());
}
/**
* Tests that when there was a node that previously had the primary, it will be allocated to that same node again.
*/
public void testPreferAllocatingPreviousPrimary() {
String primaryAllocId = UUIDs.randomBase64UUID();
String replicaAllocId = UUIDs.randomBase64UUID();
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(
yesAllocationDeciders(),
randomFrom(CLUSTER_RECOVERED, INDEX_REOPENED),
primaryAllocId,
replicaAllocId
);
boolean node1HasPrimaryShard = randomBoolean();
testAllocator.addData(node1, node1HasPrimaryShard ? primaryAllocId : replicaAllocId, node1HasPrimaryShard);
testAllocator.addData(node2, node1HasPrimaryShard ? replicaAllocId : primaryAllocId, node1HasPrimaryShard == false);
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
DiscoveryNode allocatedNode = node1HasPrimaryShard ? node1 : node2;
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
equalTo(allocatedNode.getId())
);
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when there is a node to allocate to, but it is throttling (and it is the only one),
* it will be moved to ignore unassigned until it can be allocated to.
*/
public void testFoundAllocationButThrottlingDecider() {
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(
throttleAllocationDeciders(),
CLUSTER_RECOVERED,
"allocId1"
);
testAllocator.addData(node1, "allocId1", randomBoolean());
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when there is a node to be allocated to, but it the decider said "no", we still
* force the allocation to it.
*/
public void testFoundAllocationButNoDecider() {
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(
noAllocationDeciders(),
CLUSTER_RECOVERED,
"allocId1"
);
testAllocator.addData(node1, "allocId1", randomBoolean());
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(
shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
equalTo(node1.getId())
);
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when restoring from a snapshot and we find a node with a shard copy and allocation
* deciders say yes, we allocate to that node.
*/
public void testRestore() {
RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders(), randomLong(), "allocId");
testAllocator.addData(node1, "some allocId", randomBoolean());
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when restoring from a snapshot and we find a node with a shard copy and allocation
* deciders say throttle, we add it to ignored shards.
*/
public void testRestoreThrottle() {
RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders(), randomLong(), "allocId");
testAllocator.addData(node1, "some allocId", randomBoolean());
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when restoring from a snapshot and we find a node with a shard copy but allocation
* deciders say no, we still allocate to that node.
*/
public void testRestoreForcesAllocateIfShardAvailable() {
final long shardSize = randomNonNegativeLong();
RoutingAllocation allocation = getRestoreRoutingAllocation(noAllocationDeciders(), shardSize, "allocId");
testAllocator.addData(node1, "some allocId", randomBoolean());
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
final List<ShardRouting> initializingShards = shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING);
assertThat(initializingShards.size(), equalTo(1));
assertThat(initializingShards.get(0).getExpectedShardSize(), equalTo(shardSize));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when restoring from a snapshot and we don't find a node with a shard copy, the shard will remain in
* the unassigned list to be allocated later.
*/
public void testRestoreDoesNotAssignIfNoShardAvailable() {
RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders(), randomNonNegativeLong(), "allocId");
testAllocator.addData(node1, null, randomBoolean());
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
/**
* Tests that when restoring from a snapshot and we don't know the shard size yet, the shard will remain in
* the unassigned list to be allocated later.
*/
public void testRestoreDoesNotAssignIfShardSizeNotAvailable() {
RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders(), null, "allocId");
testAllocator.addData(node1, null, false);
allocateAllUnassigned(allocation);
assertThat(allocation.routingNodesChanged(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false));
ShardRouting ignoredRouting = allocation.routingNodes().unassigned().ignored().get(0);
assertThat(ignoredRouting.unassignedInfo().lastAllocationStatus(), equalTo(AllocationStatus.FETCHING_SHARD_DATA));
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
}
private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocationDeciders, Long shardSize, String... allocIds) {
Metadata metadata = Metadata.builder()
.put(
IndexMetadata.builder(shardId.getIndexName())
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(0)
.putInSyncAllocationIds(0, Sets.newHashSet(allocIds))
)
.build();
final Snapshot snapshot = new Snapshot("test", new SnapshotId("test", UUIDs.randomBase64UUID()));
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsRestore(
metadata.getProject().index(shardId.getIndex()),
new SnapshotRecoverySource(
UUIDs.randomBase64UUID(),
snapshot,
IndexVersion.current(),
new IndexId(shardId.getIndexName(), UUIDs.randomBase64UUID(random()))
)
)
.build();
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3))
.build();
return new RoutingAllocation(allocationDeciders, state.mutableRoutingNodes(), state, null, new SnapshotShardSizeInfo(Map.of()) {
@Override
public Long getShardSize(ShardRouting shardRouting) {
return shardSize;
}
}, System.nanoTime());
}
private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(
AllocationDeciders deciders,
UnassignedInfo.Reason reason,
String... activeAllocationIds
) {
Metadata metadata = Metadata.builder()
.put(
IndexMetadata.builder(shardId.getIndexName())
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(0)
.putInSyncAllocationIds(shardId.id(), Sets.newHashSet(activeAllocationIds))
)
.build();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY);
switch (reason) {
case INDEX_CREATED -> routingTableBuilder.addAsNew(metadata.getProject().index(shardId.getIndex()));
case CLUSTER_RECOVERED -> routingTableBuilder.addAsRecovery(metadata.getProject().index(shardId.getIndex()));
case INDEX_REOPENED -> routingTableBuilder.addAsFromCloseToOpen(metadata.getProject().index(shardId.getIndex()));
default -> throw new IllegalArgumentException("can't do " + reason + " for you. teach me");
}
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(routingTableBuilder.build())
.nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3))
.build();
return new RoutingAllocation(
deciders,
state.mutableRoutingNodes(),
state,
ClusterInfo.EMPTY,
SnapshotShardSizeInfo.EMPTY,
System.nanoTime()
);
}
private void assertClusterHealthStatus(RoutingAllocation allocation, ClusterHealthStatus expectedStatus) {
GlobalRoutingTable oldRoutingTable = allocation.globalRoutingTable();
final GlobalRoutingTable newRoutingTable = oldRoutingTable.rebuild(allocation.routingNodes(), allocation.metadata());
ClusterState clusterState = ClusterState.builder(new ClusterName("test-cluster")).routingTable(newRoutingTable).build();
ClusterStateHealth clusterStateHealth = new ClusterStateHealth(
clusterState,
clusterState.metadata().getProject().getConcreteAllIndices(),
clusterState.metadata().getProject().id()
);
assertThat(clusterStateHealth.getStatus().ordinal(), lessThanOrEqualTo(expectedStatus.ordinal()));
}
private AllocationDecider getNoDeciderThatAllowsForceAllocate() {
return getNoDeciderWithForceAllocate(Decision.YES);
}
private AllocationDecider getNoDeciderThatThrottlesForceAllocate() {
return getNoDeciderWithForceAllocate(Decision.THROTTLE);
}
private AllocationDecider getNoDeciderThatDeniesForceAllocate() {
return getNoDeciderWithForceAllocate(Decision.NO);
}
private AllocationDecider getNoDeciderWithForceAllocate(final Decision forceAllocateDecision) {
return new TestAllocateDecision(Decision.NO) {
@Override
public Decision canForceAllocatePrimary(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
assert shardRouting.primary() : "cannot force allocate a non-primary shard " + shardRouting;
return forceAllocateDecision;
}
};
}
|
PrimaryShardAllocatorTests
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/Retryable.java
|
{
"start": 1098,
"end": 1624
}
|
interface ____<R> {
/**
* Perform the operation and return the data from the response.
*
* @return Return response data, formatted in the given data type
*
* @throws ExecutionException Thrown on errors connecting, writing, reading, timeouts, etc.
* that can likely be tried again
* @throws UnretryableException Thrown on errors that we can determine should not be tried again
*/
R call() throws ExecutionException, UnretryableException;
}
|
Retryable
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/cache/query/QueryCache.java
|
{
"start": 549,
"end": 661
}
|
interface ____ extends Closeable, org.apache.lucene.search.QueryCache {
void clear(String reason);
}
|
QueryCache
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java
|
{
"start": 3068,
"end": 54069
}
|
class ____ extends ESTestCase {
public void testRemoveAsTypeWithTheCorrectType() {
Map<String, Object> map = new HashMap<>(Map.of("a", 5, "b", "a string", "c", Boolean.TRUE, "d", 1.0));
Integer i = ServiceUtils.removeAsType(map, "a", Integer.class);
assertEquals(Integer.valueOf(5), i);
assertNull(map.get("a")); // field has been removed
String str = ServiceUtils.removeAsType(map, "b", String.class);
assertEquals("a string", str);
assertNull(map.get("b"));
Boolean b = ServiceUtils.removeAsType(map, "c", Boolean.class);
assertEquals(Boolean.TRUE, b);
assertNull(map.get("c"));
Double d = ServiceUtils.removeAsType(map, "d", Double.class);
assertEquals(Double.valueOf(1.0), d);
assertNull(map.get("d"));
assertThat(map.entrySet(), empty());
}
public void testRemoveAsTypeWithInCorrectType() {
Map<String, Object> map = new HashMap<>(Map.of("a", 5, "b", "a string", "c", Boolean.TRUE, "d", 5.0, "e", 5));
var e = expectThrows(ElasticsearchStatusException.class, () -> ServiceUtils.removeAsType(map, "a", String.class));
assertThat(
e.getMessage(),
containsString("field [a] is not of the expected type. The value [5] cannot be converted to a [String]")
);
assertNull(map.get("a"));
e = expectThrows(ElasticsearchStatusException.class, () -> ServiceUtils.removeAsType(map, "b", Boolean.class));
assertThat(
e.getMessage(),
containsString("field [b] is not of the expected type. The value [a string] cannot be converted to a [Boolean]")
);
assertNull(map.get("b"));
e = expectThrows(ElasticsearchStatusException.class, () -> ServiceUtils.removeAsType(map, "c", Integer.class));
assertThat(
e.getMessage(),
containsString("field [c] is not of the expected type. The value [true] cannot be converted to a [Integer]")
);
assertNull(map.get("c"));
// cannot convert double to integer
e = expectThrows(ElasticsearchStatusException.class, () -> ServiceUtils.removeAsType(map, "d", Integer.class));
assertThat(
e.getMessage(),
containsString("field [d] is not of the expected type. The value [5.0] cannot be converted to a [Integer]")
);
assertNull(map.get("d"));
// cannot convert integer to double
e = expectThrows(ElasticsearchStatusException.class, () -> ServiceUtils.removeAsType(map, "e", Double.class));
assertThat(
e.getMessage(),
containsString("field [e] is not of the expected type. The value [5] cannot be converted to a [Double]")
);
assertNull(map.get("e"));
assertThat(map.entrySet(), empty());
}
public void testRemoveAsTypeMissingReturnsNull() {
Map<String, Object> map = new HashMap<>(Map.of("a", 5, "b", "a string", "c", Boolean.TRUE));
assertNull(ServiceUtils.removeAsType(map, "missing", Integer.class));
assertThat(map.entrySet(), hasSize(3));
}
public void testRemoveAsOneOfTypes_Validation_WithCorrectTypes() {
Map<String, Object> map = new HashMap<>(Map.of("a", 5, "b", "a string", "c", Boolean.TRUE, "d", 1.0));
ValidationException validationException = new ValidationException();
Integer i = (Integer) ServiceUtils.removeAsOneOfTypes(map, "a", List.of(String.class, Integer.class), validationException);
assertEquals(Integer.valueOf(5), i);
assertNull(map.get("a")); // field has been removed
String str = (String) ServiceUtils.removeAsOneOfTypes(map, "b", List.of(Integer.class, String.class), validationException);
assertEquals("a string", str);
assertNull(map.get("b"));
Boolean b = (Boolean) ServiceUtils.removeAsOneOfTypes(map, "c", List.of(String.class, Boolean.class), validationException);
assertEquals(Boolean.TRUE, b);
assertNull(map.get("c"));
Double d = (Double) ServiceUtils.removeAsOneOfTypes(map, "d", List.of(Booleans.class, Double.class), validationException);
assertEquals(Double.valueOf(1.0), d);
assertNull(map.get("d"));
assertThat(map.entrySet(), empty());
}
public void testRemoveAsOneOfTypes_Validation_WithIncorrectType() {
Map<String, Object> map = new HashMap<>(Map.of("a", 5, "b", "a string", "c", Boolean.TRUE, "d", 5.0, "e", 5));
var validationException = new ValidationException();
Object result = ServiceUtils.removeAsOneOfTypes(map, "a", List.of(String.class, Boolean.class), validationException);
assertNull(result);
assertThat(validationException.validationErrors(), hasSize(1));
assertThat(
validationException.validationErrors().get(0),
containsString("field [a] is not of one of the expected types. The value [5] cannot be converted to one of [String, Boolean]")
);
assertNull(map.get("a"));
validationException = new ValidationException();
result = ServiceUtils.removeAsOneOfTypes(map, "b", List.of(Boolean.class, Integer.class), validationException);
assertNull(result);
assertThat(validationException.validationErrors(), hasSize(1));
assertThat(
validationException.validationErrors().get(0),
containsString(
"field [b] is not of one of the expected types. The value [a string] cannot be converted to one of [Boolean, Integer]"
)
);
assertNull(map.get("b"));
validationException = new ValidationException();
result = ServiceUtils.removeAsOneOfTypes(map, "c", List.of(String.class, Integer.class), validationException);
assertNull(result);
assertThat(validationException.validationErrors(), hasSize(1));
assertThat(
validationException.validationErrors().get(0),
containsString(
"field [c] is not of one of the expected types. The value [true] cannot be converted to one of [String, Integer]"
)
);
assertNull(map.get("c"));
validationException = new ValidationException();
result = ServiceUtils.removeAsOneOfTypes(map, "d", List.of(String.class, Boolean.class), validationException);
assertNull(result);
assertThat(validationException.validationErrors(), hasSize(1));
assertThat(
validationException.validationErrors().get(0),
containsString("field [d] is not of one of the expected types. The value [5.0] cannot be converted to one of [String, Boolean]")
);
assertNull(map.get("d"));
validationException = new ValidationException();
result = ServiceUtils.removeAsOneOfTypes(map, "e", List.of(String.class, Boolean.class), validationException);
assertNull(result);
assertThat(validationException.validationErrors(), hasSize(1));
assertThat(
validationException.validationErrors().get(0),
containsString("field [e] is not of one of the expected types. The value [5] cannot be converted to one of [String, Boolean]")
);
assertNull(map.get("e"));
assertThat(map.entrySet(), empty());
}
public void testRemoveAsOneOfTypesMissingReturnsNull() {
Map<String, Object> map = new HashMap<>(Map.of("a", 5, "b", "a string", "c", Boolean.TRUE));
assertNull(ServiceUtils.removeAsOneOfTypes(map, "missing", List.of(Integer.class), new ValidationException()));
assertThat(map.entrySet(), hasSize(3));
}
public void testRemoveAsAdaptiveAllocationsSettings() {
Map<String, Object> map = new HashMap<>(
Map.of("settings", new HashMap<>(Map.of("enabled", true, "min_number_of_allocations", 7, "max_number_of_allocations", 42)))
);
ValidationException validationException = new ValidationException();
assertThat(
ServiceUtils.removeAsAdaptiveAllocationsSettings(map, "settings", validationException),
equalTo(new AdaptiveAllocationsSettings(true, 7, 42))
);
assertThat(validationException.validationErrors(), empty());
assertThat(ServiceUtils.removeAsAdaptiveAllocationsSettings(map, "non-existent-key", validationException), nullValue());
assertThat(validationException.validationErrors(), empty());
map = new HashMap<>(Map.of("settings", new HashMap<>(Map.of("enabled", false))));
assertThat(
ServiceUtils.removeAsAdaptiveAllocationsSettings(map, "settings", validationException),
equalTo(new AdaptiveAllocationsSettings(false, null, null))
);
assertThat(validationException.validationErrors(), empty());
}
public void testRemoveAsAdaptiveAllocationsSettings_exceptions() {
Map<String, Object> map = new HashMap<>(
Map.of("settings", new HashMap<>(Map.of("enabled", "YES!", "blah", 42, "max_number_of_allocations", -7)))
);
ValidationException validationException = new ValidationException();
ServiceUtils.removeAsAdaptiveAllocationsSettings(map, "settings", validationException);
assertThat(validationException.validationErrors(), hasSize(3));
assertThat(
validationException.validationErrors().get(0),
containsString("field [enabled] is not of the expected type. The value [YES!] cannot be converted to a [Boolean]")
);
assertThat(validationException.validationErrors().get(1), containsString("[settings] does not allow the setting [blah]"));
assertThat(
validationException.validationErrors().get(2),
containsString("[max_number_of_allocations] must be a positive integer or null")
);
}
public void testConvertToUri_CreatesUri() {
var validation = new ValidationException();
var uri = convertToUri("www.elastic.co", "name", "scope", validation);
assertNotNull(uri);
assertTrue(validation.validationErrors().isEmpty());
assertThat(uri.toString(), is("www.elastic.co"));
}
public void testConvertToUri_DoesNotThrowNullPointerException_WhenPassedNull() {
var validation = new ValidationException();
var uri = convertToUri(null, "name", "scope", validation);
assertNull(uri);
assertTrue(validation.validationErrors().isEmpty());
}
public void testConvertToUri_AddsValidationError_WhenUrlIsInvalid() {
var validation = new ValidationException();
var uri = convertToUri("^^", "name", "scope", validation);
assertNull(uri);
assertThat(validation.validationErrors().size(), is(1));
assertThat(validation.validationErrors().get(0), containsString("[scope] Invalid url [^^] received for field [name]"));
}
public void testConvertToUri_AddsValidationError_WhenUrlIsInvalid_PreservesReason() {
var validation = new ValidationException();
var uri = convertToUri("^^", "name", "scope", validation);
assertNull(uri);
assertThat(validation.validationErrors().size(), is(1));
assertThat(
validation.validationErrors().get(0),
is("[scope] Invalid url [^^] received for field [name]. Error: unable to parse url [^^]. Reason: Illegal character in path")
);
}
public void testCreateUri_CreatesUri() {
var uri = createUri("www.elastic.co");
assertNotNull(uri);
assertThat(uri.toString(), is("www.elastic.co"));
}
public void testCreateUri_ThrowsException_WithInvalidUrl() {
var exception = expectThrows(IllegalArgumentException.class, () -> createUri("^^"));
assertThat(exception.getMessage(), containsString("unable to parse url [^^]"));
}
public void testCreateUri_ThrowsException_WithNullUrl() {
expectThrows(NullPointerException.class, () -> createUri(null));
}
public void testExtractOptionalUri_ReturnsUri_WhenFieldIsValid() {
var validation = new ValidationException();
Map<String, Object> map = Map.of("url", "www.elastic.co");
var uri = ServiceUtils.extractOptionalUri(new HashMap<>(map), "url", validation);
assertNotNull(uri);
assertTrue(validation.validationErrors().isEmpty());
assertThat(uri.toString(), is("www.elastic.co"));
}
public void testExtractOptionalUri_ReturnsNull_WhenFieldIsMissing() {
var validation = new ValidationException();
Map<String, Object> map = Map.of("other", "www.elastic.co");
var uri = ServiceUtils.extractOptionalUri(new HashMap<>(map), "url", validation);
assertNull(uri);
assertTrue(validation.validationErrors().isEmpty());
}
public void testExtractOptionalUri_ReturnsNullAndAddsValidationError_WhenFieldIsInvalid() {
var validation = new ValidationException();
Map<String, Object> map = Map.of("url", "^^");
var uri = ServiceUtils.extractOptionalUri(new HashMap<>(map), "url", validation);
assertNull(uri);
assertThat(validation.validationErrors().size(), is(1));
assertThat(validation.validationErrors().get(0), containsString("[service_settings] Invalid url [^^] received for field [url]"));
}
public void testExtractRequiredSecureString_CreatesSecureString() {
var validation = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", "value"));
var secureString = extractRequiredSecureString(map, "key", "scope", validation);
assertTrue(validation.validationErrors().isEmpty());
assertNotNull(secureString);
assertThat(secureString.toString(), is("value"));
assertTrue(map.isEmpty());
}
public void testExtractRequiredSecureString_AddsException_WhenFieldDoesNotExist() {
var validation = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", "value"));
var secureString = extractRequiredSecureString(map, "abc", "scope", validation);
assertNull(secureString);
assertFalse(validation.validationErrors().isEmpty());
assertThat(map.size(), is(1));
assertThat(validation.validationErrors().get(0), is("[scope] does not contain the required setting [abc]"));
}
public void testExtractRequiredSecureString_AddsException_WhenFieldIsEmpty() {
var validation = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", ""));
var createdString = extractOptionalString(map, "key", "scope", validation);
assertNull(createdString);
assertFalse(validation.validationErrors().isEmpty());
assertTrue(map.isEmpty());
assertThat(validation.validationErrors().get(0), is("[scope] Invalid value empty string. [key] must be a non-empty string"));
}
public void testExtractRequiredString_CreatesString() {
var validation = new ValidationException();
validation.addValidationError("previous error");
Map<String, Object> map = modifiableMap(Map.of("key", "value"));
var createdString = extractRequiredString(map, "key", "scope", validation);
assertThat(validation.validationErrors(), hasSize(1));
assertNotNull(createdString);
assertThat(createdString, is("value"));
assertTrue(map.isEmpty());
}
public void testExtractRequiredString_AddsException_WhenFieldDoesNotExist() {
var validation = new ValidationException();
validation.addValidationError("previous error");
Map<String, Object> map = modifiableMap(Map.of("key", "value"));
var createdString = extractRequiredSecureString(map, "abc", "scope", validation);
assertNull(createdString);
assertThat(validation.validationErrors(), hasSize(2));
assertThat(map.size(), is(1));
assertThat(validation.validationErrors().get(1), is("[scope] does not contain the required setting [abc]"));
}
public void testExtractRequiredString_AddsException_WhenFieldIsEmpty() {
var validation = new ValidationException();
validation.addValidationError("previous error");
Map<String, Object> map = modifiableMap(Map.of("key", ""));
var createdString = extractOptionalString(map, "key", "scope", validation);
assertNull(createdString);
assertFalse(validation.validationErrors().isEmpty());
assertTrue(map.isEmpty());
assertThat(validation.validationErrors().get(1), is("[scope] Invalid value empty string. [key] must be a non-empty string"));
}
public void testExtractOptionalPositiveInteger_returnsInteger_withPositiveInteger() {
var validation = new ValidationException();
validation.addValidationError("previous error");
Map<String, Object> map = modifiableMap(Map.of("abc", 1));
assertEquals(Integer.valueOf(1), extractOptionalPositiveInteger(map, "abc", "scope", validation));
assertThat(validation.validationErrors(), hasSize(1));
}
public void testExtractOptionalPositiveInteger_returnsNull_whenSettingNotFound() {
var validation = new ValidationException();
validation.addValidationError("previous error");
Map<String, Object> map = modifiableMap(Map.of("abc", 1));
assertThat(extractOptionalPositiveInteger(map, "not_abc", "scope", validation), is(nullValue()));
assertThat(validation.validationErrors(), hasSize(1));
}
public void testExtractOptionalPositiveInteger_returnsNull_addsValidationError_whenObjectIsNotInteger() {
var validation = new ValidationException();
validation.addValidationError("previous error");
String setting = "abc";
Map<String, Object> map = modifiableMap(Map.of(setting, "not_an_int"));
assertThat(extractOptionalPositiveInteger(map, setting, "scope", validation), is(nullValue()));
assertThat(validation.validationErrors(), hasSize(2));
assertThat(validation.validationErrors().getLast(), containsString("cannot be converted to a [Integer]"));
}
public void testExtractOptionalPositiveInteger_returnNull_addsValidationError_withNonPositiveInteger() {
var validation = new ValidationException();
validation.addValidationError("previous error");
String zeroKey = "zero";
String negativeKey = "negative";
Map<String, Object> map = modifiableMap(Map.of(zeroKey, 0, negativeKey, -1));
// Test zero
assertThat(extractOptionalPositiveInteger(map, zeroKey, "scope", validation), is(nullValue()));
assertThat(validation.validationErrors(), hasSize(2));
assertThat(validation.validationErrors().getLast(), containsString("[" + zeroKey + "] must be a positive integer"));
// Test a negative number
assertThat(extractOptionalPositiveInteger(map, negativeKey, "scope", validation), is(nullValue()));
assertThat(validation.validationErrors(), hasSize(3));
assertThat(validation.validationErrors().getLast(), containsString("[" + negativeKey + "] must be a positive integer"));
}
public void testExtractOptionalInteger_returnsInteger() {
var validation = new ValidationException();
validation.addValidationError("previous error");
String positiveKey = "positive";
int positiveValue = 123;
String zeroKey = "zero";
int zeroValue = 0;
String negativeKey = "negative";
int negativeValue = -123;
Map<String, Object> map = modifiableMap(Map.of(positiveKey, positiveValue, zeroKey, zeroValue, negativeKey, negativeValue));
assertThat(extractOptionalInteger(map, positiveKey, "scope", validation), is(positiveValue));
assertThat(extractOptionalInteger(map, zeroKey, "scope", validation), is(zeroValue));
assertThat(extractOptionalInteger(map, negativeKey, "scope", validation), is(negativeValue));
assertThat(validation.validationErrors(), hasSize(1));
}
public void testExtractOptionalInteger_returnsNull_whenSettingNotFound() {
var validation = new ValidationException();
validation.addValidationError("previous error");
Map<String, Object> map = modifiableMap(Map.of("abc", 1));
assertThat(extractOptionalInteger(map, "not_abc", "scope", validation), is(nullValue()));
assertThat(validation.validationErrors(), hasSize(1));
}
public void testExtractOptionalInteger_returnsNull_addsValidationError_whenObjectIsNotInteger() {
var validation = new ValidationException();
validation.addValidationError("previous error");
String setting = "abc";
Map<String, Object> map = modifiableMap(Map.of(setting, "not_an_int"));
assertThat(extractOptionalInteger(map, setting, "scope", validation), is(nullValue()));
assertThat(validation.validationErrors(), hasSize(2));
assertThat(validation.validationErrors().getLast(), containsString("cannot be converted to a [Integer]"));
}
public void testExtractOptionalPositiveLong_IntegerValue() {
var validation = new ValidationException();
validation.addValidationError("previous error");
Map<String, Object> map = modifiableMap(Map.of("abc", 3));
assertEquals(Long.valueOf(3), extractOptionalPositiveLong(map, "abc", "scope", validation));
assertThat(validation.validationErrors(), hasSize(1));
}
public void testExtractOptionalPositiveLong() {
var validation = new ValidationException();
validation.addValidationError("previous error");
Map<String, Object> map = modifiableMap(Map.of("abc", 4_000_000_000L));
assertEquals(Long.valueOf(4_000_000_000L), extractOptionalPositiveLong(map, "abc", "scope", validation));
assertThat(validation.validationErrors(), hasSize(1));
}
public void testExtractRequiredPositiveIntegerBetween_ReturnsValueWhenValueIsBetweenMinAndMax() {
var minValue = randomNonNegativeInt();
var maxValue = randomIntBetween(minValue + 2, minValue + 10);
testExtractRequiredPositiveIntegerBetween_Successful(minValue, maxValue, randomIntBetween(minValue + 1, maxValue - 1));
}
public void testExtractRequiredPositiveIntegerBetween_ReturnsValueWhenValueIsEqualToMin() {
var minValue = randomNonNegativeInt();
var maxValue = randomIntBetween(minValue + 1, minValue + 10);
testExtractRequiredPositiveIntegerBetween_Successful(minValue, maxValue, minValue);
}
public void testExtractRequiredPositiveIntegerBetween_ReturnsValueWhenValueIsEqualToMax() {
var minValue = randomNonNegativeInt();
var maxValue = randomIntBetween(minValue + 1, minValue + 10);
testExtractRequiredPositiveIntegerBetween_Successful(minValue, maxValue, maxValue);
}
private void testExtractRequiredPositiveIntegerBetween_Successful(int minValue, int maxValue, int actualValue) {
var validation = new ValidationException();
validation.addValidationError("previous error");
Map<String, Object> map = modifiableMap(Map.of("key", actualValue));
var parsedInt = ServiceUtils.extractRequiredPositiveIntegerBetween(map, "key", minValue, maxValue, "scope", validation);
assertThat(validation.validationErrors(), hasSize(1));
assertNotNull(parsedInt);
assertThat(parsedInt, is(actualValue));
assertTrue(map.isEmpty());
}
public void testExtractRequiredIntBetween_AddsErrorForValueBelowMin() {
var minValue = randomNonNegativeInt();
var maxValue = randomIntBetween(minValue, minValue + 10);
testExtractRequiredIntBetween_Unsuccessful(minValue, maxValue, minValue - 1);
}
public void testExtractRequiredIntBetween_AddsErrorForValueAboveMax() {
var minValue = randomNonNegativeInt();
var maxValue = randomIntBetween(minValue, minValue + 10);
testExtractRequiredIntBetween_Unsuccessful(minValue, maxValue, maxValue + 1);
}
private void testExtractRequiredIntBetween_Unsuccessful(int minValue, int maxValue, int actualValue) {
var validation = new ValidationException();
validation.addValidationError("previous error");
Map<String, Object> map = modifiableMap(Map.of("key", actualValue));
var parsedInt = ServiceUtils.extractRequiredPositiveIntegerBetween(map, "key", minValue, maxValue, "scope", validation);
assertThat(validation.validationErrors(), hasSize(2));
assertNull(parsedInt);
assertTrue(map.isEmpty());
assertThat(validation.validationErrors().get(1), containsString("Invalid value"));
}
public void testExtractOptionalTimeValue_ReturnsNull_WhenKeyDoesNotExist() {
var validation = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", 1));
var timeValue = extractOptionalTimeValue(map, "a", "scope", validation);
assertNull(timeValue);
assertTrue(validation.validationErrors().isEmpty());
}
public void testExtractOptionalTimeValue_CreatesTimeValue_Of3Seconds() {
var validation = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", "3s"));
var timeValue = extractOptionalTimeValue(map, "key", "scope", validation);
assertTrue(validation.validationErrors().isEmpty());
assertNotNull(timeValue);
assertThat(timeValue, is(TimeValue.timeValueSeconds(3)));
assertTrue(map.isEmpty());
}
public void testExtractOptionalTimeValue_ReturnsNullAndAddsException_WhenTimeValueIsInvalid_InvalidUnit() {
var validation = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", "3abc"));
var timeValue = extractOptionalTimeValue(map, "key", "scope", validation);
assertFalse(validation.validationErrors().isEmpty());
assertNull(timeValue);
assertTrue(map.isEmpty());
assertThat(
validation.validationErrors().get(0),
is(
"[scope] Invalid time value [3abc]. [key] must be a valid time value string: failed to parse setting [key] "
+ "with value [3abc] as a time value: unit is missing or unrecognized"
)
);
}
public void testExtractOptionalTimeValue_ReturnsNullAndAddsException_WhenTimeValueIsInvalid_NegativeNumber() {
var validation = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", "-3d"));
var timeValue = extractOptionalTimeValue(map, "key", "scope", validation);
assertFalse(validation.validationErrors().isEmpty());
assertNull(timeValue);
assertTrue(map.isEmpty());
assertThat(
validation.validationErrors().get(0),
is(
"[scope] Invalid time value [-3d]. [key] must be a valid time value string: failed to parse setting [key] "
+ "with value [-3d] as a time value: negative durations are not supported"
)
);
}
public void testExtractOptionalDouble_ExtractsAsDoubleInRange() {
var validationException = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", 1.01));
var result = ServiceUtils.extractOptionalDoubleInRange(map, "key", 0.0, 2.0, "test_scope", validationException);
assertEquals(Double.valueOf(1.01), result);
assertTrue(map.isEmpty());
assertThat(validationException.validationErrors().size(), is(0));
}
public void testExtractOptionalDouble_InRange_ReturnsNullWhenKeyNotPresent() {
var validationException = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", 1.01));
var result = ServiceUtils.extractOptionalDoubleInRange(map, "other_key", 0.0, 2.0, "test_scope", validationException);
assertNull(result);
assertThat(map.size(), is(1));
assertThat(map.get("key"), is(1.01));
}
public void testExtractOptionalDouble_InRange_HasErrorWhenBelowMinValue() {
var validationException = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", -2.0));
var result = ServiceUtils.extractOptionalDoubleInRange(map, "key", 0.0, 2.0, "test_scope", validationException);
assertNull(result);
assertThat(validationException.validationErrors().size(), is(1));
assertThat(
validationException.validationErrors().get(0),
is("[test_scope] Invalid value [-2.0]. [key] must be a greater than or equal to [0.0]")
);
}
public void testExtractOptionalDouble_InRange_HasErrorWhenAboveMaxValue() {
var validationException = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", 12.0));
var result = ServiceUtils.extractOptionalDoubleInRange(map, "key", 0.0, 2.0, "test_scope", validationException);
assertNull(result);
assertThat(validationException.validationErrors().size(), is(1));
assertThat(
validationException.validationErrors().get(0),
is("[test_scope] Invalid value [12.0]. [key] must be a less than or equal to [2.0]")
);
}
public void testExtractOptionalDouble_InRange_DoesNotCheckMinWhenNull() {
var validationException = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", -2.0));
var result = ServiceUtils.extractOptionalDoubleInRange(map, "key", null, 2.0, "test_scope", validationException);
assertEquals(Double.valueOf(-2.0), result);
assertTrue(map.isEmpty());
assertThat(validationException.validationErrors().size(), is(0));
}
public void testExtractOptionalDouble_InRange_DoesNotCheckMaxWhenNull() {
var validationException = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", 12.0));
var result = ServiceUtils.extractOptionalDoubleInRange(map, "key", 0.0, null, "test_scope", validationException);
assertEquals(Double.valueOf(12.0), result);
assertTrue(map.isEmpty());
assertThat(validationException.validationErrors().size(), is(0));
}
public void testExtractOptionalFloat_ExtractsAFloat() {
Map<String, Object> map = modifiableMap(Map.of("key", 1.0f));
var result = ServiceUtils.extractOptionalFloat(map, "key");
assertThat(result, is(1.0f));
assertTrue(map.isEmpty());
}
public void testExtractOptionalFloat_ReturnsNullWhenKeyNotPresent() {
Map<String, Object> map = modifiableMap(Map.of("key", 1.0f));
var result = ServiceUtils.extractOptionalFloat(map, "other_key");
assertNull(result);
assertThat(map.size(), is(1));
assertThat(map.get("key"), is(1.0f));
}
public void testExtractRequiredEnum_ExtractsAEnum() {
ValidationException validationException = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", "ingest"));
var result = ServiceUtils.extractRequiredEnum(
map,
"key",
"testscope",
InputType::fromString,
EnumSet.allOf(InputType.class),
validationException
);
assertThat(result, is(InputType.INGEST));
}
public void testExtractRequiredEnum_ReturnsNullWhenEnumValueIsNotPresent() {
ValidationException validationException = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", "invalid"));
var result = ServiceUtils.extractRequiredEnum(
map,
"key",
"testscope",
InputType::fromString,
EnumSet.allOf(InputType.class),
validationException
);
assertNull(result);
assertThat(validationException.validationErrors().size(), is(1));
assertThat(validationException.validationErrors().get(0), containsString("Invalid value [invalid] received. [key] must be one of"));
}
public void testExtractRequiredEnum_HasValidationErrorOnMissingSetting() {
ValidationException validationException = new ValidationException();
Map<String, Object> map = modifiableMap(Map.of("key", "ingest"));
var result = ServiceUtils.extractRequiredEnum(
map,
"missing_key",
"testscope",
InputType::fromString,
EnumSet.allOf(InputType.class),
validationException
);
assertNull(result);
assertThat(validationException.validationErrors().size(), is(1));
assertThat(validationException.validationErrors().get(0), is("[testscope] does not contain the required setting [missing_key]"));
}
public void testValidateInputType_NoValidationErrorsWhenInternalType() {
ValidationException validationException = new ValidationException();
ServiceUtils.validateInputTypeIsUnspecifiedOrInternal(InputType.INTERNAL_SEARCH, validationException);
assertThat(validationException.validationErrors().size(), is(0));
ServiceUtils.validateInputTypeIsUnspecifiedOrInternal(InputType.INTERNAL_INGEST, validationException);
assertThat(validationException.validationErrors().size(), is(0));
}
public void testValidateInputType_NoValidationErrorsWhenInputTypeIsNullOrUnspecified() {
ValidationException validationException = new ValidationException();
ServiceUtils.validateInputTypeIsUnspecifiedOrInternal(InputType.UNSPECIFIED, validationException);
assertThat(validationException.validationErrors().size(), is(0));
ServiceUtils.validateInputTypeIsUnspecifiedOrInternal(null, validationException);
assertThat(validationException.validationErrors().size(), is(0));
}
public void testValidateInputType_ValidationErrorsWhenInputTypeIsSpecified() {
ValidationException validationException = new ValidationException();
ServiceUtils.validateInputTypeIsUnspecifiedOrInternal(InputType.SEARCH, validationException);
assertThat(validationException.validationErrors().size(), is(1));
ServiceUtils.validateInputTypeIsUnspecifiedOrInternal(InputType.INGEST, validationException);
assertThat(validationException.validationErrors().size(), is(2));
ServiceUtils.validateInputTypeIsUnspecifiedOrInternal(InputType.CLASSIFICATION, validationException);
assertThat(validationException.validationErrors().size(), is(3));
ServiceUtils.validateInputTypeIsUnspecifiedOrInternal(InputType.CLUSTERING, validationException);
assertThat(validationException.validationErrors().size(), is(4));
}
public void testExtractRequiredMap() {
var validation = new ValidationException();
var extractedMap = extractRequiredMap(modifiableMap(Map.of("setting", Map.of("key", "value"))), "setting", "scope", validation);
assertTrue(validation.validationErrors().isEmpty());
assertThat(extractedMap, is(Map.of("key", "value")));
}
public void testExtractRequiredMap_ReturnsNull_WhenTypeIsInvalid() {
var validation = new ValidationException();
var extractedMap = extractRequiredMap(modifiableMap(Map.of("setting", 123)), "setting", "scope", validation);
assertNull(extractedMap);
assertThat(
validation.getMessage(),
is("Validation Failed: 1: field [setting] is not of the expected type. The value [123] cannot be converted to a [Map];")
);
}
public void testExtractRequiredMap_ReturnsNull_WhenMissingSetting() {
var validation = new ValidationException();
var extractedMap = extractRequiredMap(modifiableMap(Map.of("not_setting", Map.of("key", "value"))), "setting", "scope", validation);
assertNull(extractedMap);
assertThat(validation.getMessage(), is("Validation Failed: 1: [scope] does not contain the required setting [setting];"));
}
public void testExtractRequiredMap_ReturnsNull_WhenMapIsEmpty() {
var validation = new ValidationException();
var extractedMap = extractRequiredMap(modifiableMap(Map.of("setting", Map.of())), "setting", "scope", validation);
assertNull(extractedMap);
assertThat(
validation.getMessage(),
is("Validation Failed: 1: [scope] Invalid value empty map. [setting] must be a non-empty map;")
);
}
public void testExtractOptionalMap() {
var validation = new ValidationException();
var extractedMap = extractOptionalMap(modifiableMap(Map.of("setting", Map.of("key", "value"))), "setting", validation);
assertTrue(validation.validationErrors().isEmpty());
assertThat(extractedMap, is(Map.of("key", "value")));
}
public void testExtractOptionalMap_ReturnsNull_WhenTypeIsInvalid() {
var validation = new ValidationException();
var extractedMap = extractOptionalMap(modifiableMap(Map.of("setting", 123)), "setting", validation);
assertNull(extractedMap);
assertThat(
validation.getMessage(),
is("Validation Failed: 1: field [setting] is not of the expected type. The value [123] cannot be converted to a [Map];")
);
}
public void testExtractOptionalMap_ReturnsNull_WhenMissingSetting() {
var validation = new ValidationException();
var extractedMap = extractOptionalMap(modifiableMap(Map.of("not_setting", Map.of("key", "value"))), "setting", validation);
assertNull(extractedMap);
assertTrue(validation.validationErrors().isEmpty());
}
public void testExtractOptionalMap_ReturnsEmptyMap_WhenEmpty() {
var validation = new ValidationException();
var extractedMap = extractOptionalMap(modifiableMap(Map.of("setting", Map.of())), "setting", validation);
assertThat(extractedMap, is(Map.of()));
}
public void testExtractOptionalMapRemoveNulls() {
var validation = new ValidationException();
var map = modifiableMap(Map.of("key", "value"));
map.put("null_key", null);
var extractedMap = extractOptionalMapRemoveNulls(modifiableMap(Map.of("setting", map)), "setting", validation);
assertTrue(validation.validationErrors().isEmpty());
assertThat(extractedMap, is(Map.of("key", "value")));
}
public void testExtractOptionalMapRemoveNulls_HandlesNullMap_FromUnknownSetting() {
var validation = new ValidationException();
var extractedMap = extractOptionalMapRemoveNulls(
modifiableMap(Map.of("setting", Map.of("key", "value"))),
"key_that_does_not_exist",
validation
);
assertTrue(validation.validationErrors().isEmpty());
assertNull(extractedMap);
}
public void testValidateMapValues() {
var validation = new ValidationException();
validateMapValues(
Map.of("string_key", "abc", "num_key", Integer.valueOf(1)),
List.of(String.class, Integer.class),
"setting",
validation,
false
);
}
public void testValidateMapValues_IgnoresNullMap() {
var validation = new ValidationException();
validateMapValues(null, List.of(String.class, Integer.class), "setting", validation, false);
}
public void testValidateMapValues_ThrowsException_WhenMapContainsInvalidTypes() {
// Includes the invalid key and value in the exception message
{
var validation = new ValidationException();
var exception = expectThrows(
ValidationException.class,
() -> validateMapValues(
Map.of("string_key", "abc", "num_key", Integer.valueOf(1)),
List.of(String.class),
"setting",
validation,
false
)
);
assertThat(
exception.getMessage(),
is(
"Validation Failed: 1: Map field [setting] has an entry that is not valid, "
+ "[num_key => 1]. Value type of [1] is not one of [String].;"
)
);
}
// Does not include the invalid key and value in the exception message
{
var validation = new ValidationException();
var exception = expectThrows(
ValidationException.class,
() -> validateMapValues(
Map.of("string_key", "abc", "num_key", Integer.valueOf(1)),
List.of(String.class, List.class),
"setting",
validation,
true
)
);
assertThat(
exception.getMessage(),
is(
"Validation Failed: 1: Map field [setting] has an entry that is not valid. "
+ "Value type is not one of [List, String].;"
)
);
}
}
public void testValidateMapStringValues() {
var validation = new ValidationException();
assertThat(
validateMapStringValues(Map.of("string_key", "abc", "string_key2", new String("awesome")), "setting", validation, false),
is(Map.of("string_key", "abc", "string_key2", "awesome"))
);
}
public void testValidateMapStringValues_ReturnsEmptyMap_WhenMapIsNull() {
var validation = new ValidationException();
assertThat(validateMapStringValues(null, "setting", validation, false), is(Map.of()));
}
public void testValidateMapStringValues_ReturnsNullDefaultValue_WhenMapIsNull() {
var validation = new ValidationException();
assertNull(validateMapStringValues(null, "setting", validation, false, null));
}
public void testValidateMapStringValues_ThrowsException_WhenMapContainsInvalidTypes() {
// Includes the invalid key and value in the exception message
{
var validation = new ValidationException();
var exception = expectThrows(
ValidationException.class,
() -> validateMapStringValues(Map.of("string_key", "abc", "num_key", Integer.valueOf(1)), "setting", validation, false)
);
assertThat(
exception.getMessage(),
is(
"Validation Failed: 1: Map field [setting] has an entry that is not valid, "
+ "[num_key => 1]. Value type of [1] is not one of [String].;"
)
);
}
// Does not include the invalid key and value in the exception message
{
var validation = new ValidationException();
var exception = expectThrows(
ValidationException.class,
() -> validateMapStringValues(Map.of("string_key", "abc", "num_key", Integer.valueOf(1)), "setting", validation, true)
);
assertThat(
exception.getMessage(),
is("Validation Failed: 1: Map field [setting] has an entry that is not valid. Value type is not one of [String].;")
);
}
}
public void testConvertMapStringsToSecureString() {
var validation = new ValidationException();
assertThat(
convertMapStringsToSecureString(Map.of("key", "value", "key2", "abc"), "setting", validation),
is(Map.of("key", new SecureString("value".toCharArray()), "key2", new SecureString("abc".toCharArray())))
);
}
public void testConvertMapStringsToSecureString_ReturnsAnEmptyMap_WhenMapIsNull() {
var validation = new ValidationException();
assertThat(convertMapStringsToSecureString(null, "setting", validation), is(Map.of()));
}
public void testConvertMapStringsToSecureString_ThrowsException_WhenMapContainsInvalidTypes() {
var validation = new ValidationException();
var exception = expectThrows(
ValidationException.class,
() -> convertMapStringsToSecureString(Map.of("key", "value", "key2", 123), "setting", validation)
);
assertThat(
exception.getMessage(),
is("Validation Failed: 1: Map field [setting] has an entry that is not valid. Value type is not one of [String].;")
);
}
public void testRemoveNullValues() {
var map = new HashMap<String, Object>();
map.put("key1", null);
map.put("key2", "awesome");
map.put("key3", null);
assertThat(removeNullValues(map), is(Map.of("key2", "awesome")));
}
public void testRemoveNullValues_ReturnsNull_WhenMapIsNull() {
assertNull(removeNullValues(null));
}
public void testExtractOptionalListOfStringTuples() {
var validation = new ValidationException();
assertThat(
extractOptionalListOfStringTuples(
modifiableMap(Map.of("params", List.of(List.of("key", "value"), List.of("key2", "value2")))),
"params",
"scope",
validation
),
is(List.of(new Tuple<>("key", "value"), new Tuple<>("key2", "value2")))
);
}
public void testExtractOptionalListOfStringTuples_ReturnsNull_WhenFieldIsNotAList() {
var validation = new ValidationException();
assertNull(extractOptionalListOfStringTuples(modifiableMap(Map.of("params", Map.of())), "params", "scope", validation));
assertThat(
validation.getMessage(),
is("Validation Failed: 1: field [params] is not of the expected type. The value [{}] cannot be converted to a [List];")
);
}
public void testExtractOptionalListOfStringTuples_Exception_WhenTupleIsNotAList() {
var validation = new ValidationException();
var exception = expectThrows(
ValidationException.class,
() -> extractOptionalListOfStringTuples(modifiableMap(Map.of("params", List.of("string"))), "params", "scope", validation)
);
assertThat(
exception.getMessage(),
is(
"Validation Failed: 1: [scope] failed to parse tuple list entry [0] for setting "
+ "[params], expected a list but the entry is [String];"
)
);
}
public void testExtractOptionalListOfStringTuples_Exception_WhenTupleIsListSize2() {
var validation = new ValidationException();
var exception = expectThrows(
ValidationException.class,
() -> extractOptionalListOfStringTuples(
modifiableMap(Map.of("params", List.of(List.of("string")))),
"params",
"scope",
validation
)
);
assertThat(
exception.getMessage(),
is(
"Validation Failed: 1: [scope] failed to parse tuple list entry "
+ "[0] for setting [params], the tuple list size must be two, but was [1];"
)
);
}
public void testExtractOptionalListOfStringTuples_Exception_WhenTupleFirstElement_IsNotAString() {
var validation = new ValidationException();
var exception = expectThrows(
ValidationException.class,
() -> extractOptionalListOfStringTuples(
modifiableMap(Map.of("params", List.of(List.of(1, "value")))),
"params",
"scope",
validation
)
);
assertThat(
exception.getMessage(),
is(
"Validation Failed: 1: [scope] failed to parse tuple list entry [0] for setting [params], "
+ "the first element must be a string but was [Integer];"
)
);
}
public void testExtractOptionalListOfStringTuples_Exception_WhenTupleSecondElement_IsNotAString() {
var validation = new ValidationException();
var exception = expectThrows(
ValidationException.class,
() -> extractOptionalListOfStringTuples(
modifiableMap(Map.of("params", List.of(List.of("key", 2)))),
"params",
"scope",
validation
)
);
assertThat(
exception.getMessage(),
is(
"Validation Failed: 1: [scope] failed to parse tuple list entry [0] for setting [params], "
+ "the second element must be a string but was [Integer];"
)
);
}
public void testResolveInferenceTimeout_WithProvidedTimeout_ReturnsProvidedTimeout() {
var clusterService = mockClusterService(Settings.builder().put(InferencePlugin.INFERENCE_QUERY_TIMEOUT.getKey(), "10s").build());
var providedTimeout = TimeValue.timeValueSeconds(45);
for (InputType inputType : InputType.values()) {
var result = ServiceUtils.resolveInferenceTimeout(providedTimeout, inputType, clusterService);
assertEquals("Input type " + inputType + " should return provided timeout", providedTimeout, result);
}
}
public void testResolveInferenceTimeout_WithNullTimeout_ReturnsExpectedTimeoutByInputType() {
var configuredTimeout = TimeValue.timeValueSeconds(10);
var clusterService = mockClusterService(
Settings.builder().put(InferencePlugin.INFERENCE_QUERY_TIMEOUT.getKey(), configuredTimeout).build()
);
Map<InputType, TimeValue> expectedTimeouts = Map.of(
InputType.SEARCH,
configuredTimeout,
InputType.INTERNAL_SEARCH,
configuredTimeout,
InputType.INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
InputType.INTERNAL_INGEST,
InferenceAction.Request.DEFAULT_TIMEOUT,
InputType.CLASSIFICATION,
InferenceAction.Request.DEFAULT_TIMEOUT,
InputType.CLUSTERING,
InferenceAction.Request.DEFAULT_TIMEOUT,
InputType.UNSPECIFIED,
InferenceAction.Request.DEFAULT_TIMEOUT
);
for (Map.Entry<InputType, TimeValue> entry : expectedTimeouts.entrySet()) {
InputType inputType = entry.getKey();
TimeValue expectedTimeout = entry.getValue();
var result = ServiceUtils.resolveInferenceTimeout(null, inputType, clusterService);
assertEquals("Input type " + inputType + " should return expected timeout", expectedTimeout, result);
}
}
}
|
ServiceUtilsTests
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/InterruptionTest.java
|
{
"start": 2843,
"end": 3286
}
|
class ____ extends AbstractFuture<Object> {
void f() {
cancel(wasInterrupted());
}
}
""")
.doTest();
}
@Test
public void negativeDelegate() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.common.util.concurrent.AbstractFuture;
import java.util.concurrent.Future;
|
Test
|
java
|
apache__camel
|
components/camel-ai/camel-djl/src/generated/java/org/apache/camel/component/djl/DJLEndpointUriFactory.java
|
{
"start": 513,
"end": 2249
}
|
class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":application";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(6);
props.add("application");
props.add("artifactId");
props.add("lazyStartProducer");
props.add("model");
props.add("showProgress");
props.add("translator");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
SECRET_PROPERTY_NAMES = Collections.emptySet();
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "djl".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "application", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
|
DJLEndpointUriFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/RightJoinNullnessPredicateQueryTest.java
|
{
"start": 4344,
"end": 4676
}
|
class ____ {
@Id
private Long id;
@OneToOne
private RelatedEntity related;
public MainEntity() {
}
public MainEntity(Long id, RelatedEntity related) {
this.id = id;
this.related = related;
}
public Long getId() {
return id;
}
public RelatedEntity getRelated() {
return related;
}
}
}
|
MainEntity
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/common/util/LongLongHashTests.java
|
{
"start": 876,
"end": 3967
}
|
class ____ extends ESTestCase {
private BigArrays randombigArrays() {
return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
}
private LongLongHash randomHash() {
// Test high load factors to make sure that collision resolution works fine
final float maxLoadFactor = 0.6f + randomFloat() * 0.39f;
return new LongLongHash(randomIntBetween(0, 100), maxLoadFactor, randombigArrays());
}
public void testSimple() {
try (LongLongHash hash = randomHash()) {
assertThat(hash.add(0, 0), equalTo(0L));
assertThat(hash.add(0, 1), equalTo(1L));
assertThat(hash.add(0, 2), equalTo(2L));
assertThat(hash.add(1, 0), equalTo(3L));
assertThat(hash.add(1, 1), equalTo(4L));
assertThat(hash.add(0, 0), equalTo(-1L));
assertThat(hash.add(0, 2), equalTo(-3L));
assertThat(hash.add(1, 1), equalTo(-5L));
assertThat(hash.getKey1(0), equalTo(0L));
assertThat(hash.getKey2(0), equalTo(0L));
assertThat(hash.getKey1(4), equalTo(1L));
assertThat(hash.getKey2(4), equalTo(1L));
}
}
public void testDuel() {
try (LongLongHash hash = randomHash()) {
int iters = scaledRandomIntBetween(100, 100000);
Key[] values = randomArray(1, iters, Key[]::new, () -> new Key(randomLong(), randomLong()));
Map<Key, Integer> keyToId = new HashMap<>();
List<Key> idToKey = new ArrayList<>();
for (int i = 0; i < iters; ++i) {
Key key = randomFrom(values);
if (keyToId.containsKey(key)) {
assertEquals(-1 - keyToId.get(key), hash.add(key.key1, key.key2));
} else {
assertEquals(keyToId.size(), hash.add(key.key1, key.key2));
keyToId.put(key, keyToId.size());
idToKey.add(key);
}
}
assertEquals(keyToId.size(), hash.size());
for (Map.Entry<Key, Integer> entry : keyToId.entrySet()) {
assertEquals(entry.getValue().longValue(), hash.find(entry.getKey().key1, entry.getKey().key2));
}
assertEquals(idToKey.size(), hash.size());
for (long i = 0; i < hash.capacity(); i++) {
long id = hash.id(i);
if (id >= 0) {
Key key = idToKey.get((int) id);
assertEquals(key.key1, hash.getKey1(id));
assertEquals(key.key2, hash.getKey2(id));
}
}
for (long i = 0; i < hash.size(); i++) {
Key key = idToKey.get((int) i);
assertEquals(key.key1, hash.getKey1(i));
assertEquals(key.key2, hash.getKey2(i));
}
}
}
public void testAllocation() {
MockBigArrays.assertFitsIn(ByteSizeValue.ofBytes(256), bigArrays -> new LongLongHash(1, bigArrays));
}
|
LongLongHashTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openshiftai/embeddings/OpenShiftAiEmbeddingsResponseHandler.java
|
{
"start": 787,
"end": 1322
}
|
class ____ extends OpenAiResponseHandler {
/**
* Constructs a new OpenShiftAiEmbeddingsResponseHandler with the specified request type and response parser.
*
* @param requestType the type of request this handler will process
* @param parseFunction the function to parse the response
*/
public OpenShiftAiEmbeddingsResponseHandler(String requestType, ResponseParser parseFunction) {
super(requestType, parseFunction, ErrorResponse::fromResponse, false);
}
}
|
OpenShiftAiEmbeddingsResponseHandler
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/extension/memoized/MemoizedTest.java
|
{
"start": 16857,
"end": 16907
}
|
interface ____<InputT, ResultT> {}
|
TypeEdgeIterable
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/FilterChainProxy.java
|
{
"start": 15100,
"end": 16115
}
|
class ____ implements FilterChain {
private final FilterChain originalChain;
private final List<Filter> additionalFilters;
private final int size;
private int currentPosition = 0;
private VirtualFilterChain(FilterChain chain, List<Filter> additionalFilters) {
this.originalChain = chain;
this.additionalFilters = additionalFilters;
this.size = additionalFilters.size();
}
@Override
public void doFilter(ServletRequest request, ServletResponse response) throws IOException, ServletException {
if (this.currentPosition == this.size) {
this.originalChain.doFilter(request, response);
return;
}
this.currentPosition++;
Filter nextFilter = this.additionalFilters.get(this.currentPosition - 1);
if (logger.isTraceEnabled()) {
String name = nextFilter.getClass().getSimpleName();
logger.trace(LogMessage.format("Invoking %s (%d/%d)", name, this.currentPosition, this.size));
}
nextFilter.doFilter(request, response, this);
}
}
public
|
VirtualFilterChain
|
java
|
google__guava
|
android/guava/src/com/google/common/base/CommonPattern.java
|
{
"start": 935,
"end": 1400
}
|
class ____ {
public abstract CommonMatcher matcher(CharSequence t);
public abstract String pattern();
public abstract int flags();
// Re-declare this as abstract to force subclasses to override.
@Override
public abstract String toString();
public static CommonPattern compile(String pattern) {
return Platform.compilePattern(pattern);
}
public static boolean isPcreLike() {
return Platform.patternCompilerIsPcreLike();
}
}
|
CommonPattern
|
java
|
mapstruct__mapstruct
|
processor/src/test/resources/fixtures/org/mapstruct/ap/test/bugs/_913/DomainDtoWithPresenceCheckMapperImpl.java
|
{
"start": 534,
"end": 7651
}
|
class ____ implements DomainDtoWithPresenceCheckMapper {
private final Helper helper = new Helper();
@Override
public Domain create(DtoWithPresenceCheck source) {
if ( source == null ) {
return null;
}
Domain domain = createNullDomain();
if ( source.hasStrings() ) {
List<String> list = source.getStrings();
domain.setStrings( new LinkedHashSet<String>( list ) );
}
if ( source.hasStrings() ) {
domain.setLongs( stringListToLongSet( source.getStrings() ) );
}
if ( source.hasStringsInitialized() ) {
List<String> list1 = source.getStringsInitialized();
domain.setStringsInitialized( new LinkedHashSet<String>( list1 ) );
}
if ( source.hasStringsInitialized() ) {
domain.setLongsInitialized( stringListToLongSet( source.getStringsInitialized() ) );
}
if ( source.hasStringsWithDefault() ) {
List<String> list2 = source.getStringsWithDefault();
domain.setStringsWithDefault( new ArrayList<String>( list2 ) );
}
else {
domain.setStringsWithDefault( helper.toList( "3" ) );
}
return domain;
}
@Override
public void update(DtoWithPresenceCheck source, Domain target) {
if ( source == null ) {
return;
}
if ( target.getStrings() != null ) {
if ( source.hasStrings() ) {
target.getStrings().clear();
target.getStrings().addAll( source.getStrings() );
}
}
else {
if ( source.hasStrings() ) {
List<String> list = source.getStrings();
target.setStrings( new LinkedHashSet<String>( list ) );
}
}
if ( target.getLongs() != null ) {
if ( source.hasStrings() ) {
target.getLongs().clear();
target.getLongs().addAll( stringListToLongSet( source.getStrings() ) );
}
}
else {
if ( source.hasStrings() ) {
target.setLongs( stringListToLongSet( source.getStrings() ) );
}
}
if ( target.getStringsInitialized() != null ) {
if ( source.hasStringsInitialized() ) {
target.getStringsInitialized().clear();
target.getStringsInitialized().addAll( source.getStringsInitialized() );
}
}
else {
if ( source.hasStringsInitialized() ) {
List<String> list1 = source.getStringsInitialized();
target.setStringsInitialized( new LinkedHashSet<String>( list1 ) );
}
}
if ( target.getLongsInitialized() != null ) {
if ( source.hasStringsInitialized() ) {
target.getLongsInitialized().clear();
target.getLongsInitialized().addAll( stringListToLongSet( source.getStringsInitialized() ) );
}
}
else {
if ( source.hasStringsInitialized() ) {
target.setLongsInitialized( stringListToLongSet( source.getStringsInitialized() ) );
}
}
if ( target.getStringsWithDefault() != null ) {
if ( source.hasStringsWithDefault() ) {
target.getStringsWithDefault().clear();
target.getStringsWithDefault().addAll( source.getStringsWithDefault() );
}
else {
target.setStringsWithDefault( helper.toList( "3" ) );
}
}
else {
if ( source.hasStringsWithDefault() ) {
List<String> list2 = source.getStringsWithDefault();
target.setStringsWithDefault( new ArrayList<String>( list2 ) );
}
else {
target.setStringsWithDefault( helper.toList( "3" ) );
}
}
}
@Override
public Domain updateWithReturn(DtoWithPresenceCheck source, Domain target) {
if ( source == null ) {
return target;
}
if ( target.getStrings() != null ) {
if ( source.hasStrings() ) {
target.getStrings().clear();
target.getStrings().addAll( source.getStrings() );
}
}
else {
if ( source.hasStrings() ) {
List<String> list = source.getStrings();
target.setStrings( new LinkedHashSet<String>( list ) );
}
}
if ( target.getLongs() != null ) {
if ( source.hasStrings() ) {
target.getLongs().clear();
target.getLongs().addAll( stringListToLongSet( source.getStrings() ) );
}
}
else {
if ( source.hasStrings() ) {
target.setLongs( stringListToLongSet( source.getStrings() ) );
}
}
if ( target.getStringsInitialized() != null ) {
if ( source.hasStringsInitialized() ) {
target.getStringsInitialized().clear();
target.getStringsInitialized().addAll( source.getStringsInitialized() );
}
}
else {
if ( source.hasStringsInitialized() ) {
List<String> list1 = source.getStringsInitialized();
target.setStringsInitialized( new LinkedHashSet<String>( list1 ) );
}
}
if ( target.getLongsInitialized() != null ) {
if ( source.hasStringsInitialized() ) {
target.getLongsInitialized().clear();
target.getLongsInitialized().addAll( stringListToLongSet( source.getStringsInitialized() ) );
}
}
else {
if ( source.hasStringsInitialized() ) {
target.setLongsInitialized( stringListToLongSet( source.getStringsInitialized() ) );
}
}
if ( target.getStringsWithDefault() != null ) {
if ( source.hasStringsWithDefault() ) {
target.getStringsWithDefault().clear();
target.getStringsWithDefault().addAll( source.getStringsWithDefault() );
}
else {
target.setStringsWithDefault( helper.toList( "3" ) );
}
}
else {
if ( source.hasStringsWithDefault() ) {
List<String> list2 = source.getStringsWithDefault();
target.setStringsWithDefault( new ArrayList<String>( list2 ) );
}
else {
target.setStringsWithDefault( helper.toList( "3" ) );
}
}
return target;
}
protected Set<Long> stringListToLongSet(List<String> list) {
if ( list == null ) {
return null;
}
Set<Long> set = new LinkedHashSet<Long>( Math.max( (int) ( list.size() / .75f ) + 1, 16 ) );
for ( String string : list ) {
set.add( Long.parseLong( string ) );
}
return set;
}
}
|
DomainDtoWithPresenceCheckMapperImpl
|
java
|
apache__dubbo
|
dubbo-common/src/main/java/org/apache/dubbo/common/config/configcenter/AbstractDynamicConfigurationFactory.java
|
{
"start": 1241,
"end": 1816
}
|
class ____ implements DynamicConfigurationFactory {
private volatile ConcurrentHashMap<String, DynamicConfiguration> dynamicConfigurations = new ConcurrentHashMap<>();
@Override
public final DynamicConfiguration getDynamicConfiguration(URL url) {
String key = url == null ? DEFAULT_KEY : url.toServiceString();
return ConcurrentHashMapUtils.computeIfAbsent(dynamicConfigurations, key, k -> createDynamicConfiguration(url));
}
protected abstract DynamicConfiguration createDynamicConfiguration(URL url);
}
|
AbstractDynamicConfigurationFactory
|
java
|
quarkusio__quarkus
|
extensions/resteasy-classic/resteasy-common/runtime/src/main/java/io/quarkus/resteasy/common/runtime/ResteasyInjectorFactoryRecorder.java
|
{
"start": 199,
"end": 364
}
|
class ____ {
public RuntimeValue<InjectorFactory> setup() {
return new RuntimeValue<>(new QuarkusInjectorFactory());
}
}
|
ResteasyInjectorFactoryRecorder
|
java
|
google__guice
|
core/src/com/google/inject/internal/InternalFlags.java
|
{
"start": 3427,
"end": 4468
}
|
enum ____ {
/** Ignore null parameters to @Provides methods. */
IGNORE,
/** Warn if null parameters are passed to non-@Nullable parameters of provides methods. */
WARN,
/** Error if null parameters are passed to non-@Nullable parameters of provides parameters */
ERROR
}
/**
* Options for controlling whether Guice uses bytecode generation at runtime. When bytecode
* generation is enabled, the following features will be enabled in Guice:
*
* <ul>
* <li>Runtime bytecode generation (instead of reflection) will be used when Guice need to
* invoke application code.
* <li>Method interception.
* </ul>
*
* <p>Bytecode generation is generally faster than using reflection when invoking application
* code, however, it can use more memory and slower in certain cases due to the time spent in
* generating the classes. If you prefer to use reflection over bytecode generation then set
* {@link BytecodeGenOption} to {@code DISABLED}.
*/
public
|
NullableProvidesOption
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_2174/UserMapper.java
|
{
"start": 2580,
"end": 2735
}
|
class ____ extends Exception {
public CityNotFoundException(String message) {
super( message );
}
}
|
CityNotFoundException
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForDouble.java
|
{
"start": 2255,
"end": 2699
}
|
class ____ extends KeyExtractorForDouble {
private final DoubleVector vector;
FromVector(TopNEncoder encoder, byte nul, byte nonNul, DoubleVector vector) {
super(encoder, nul, nonNul);
this.vector = vector;
}
@Override
public int writeKey(BreakingBytesRefBuilder key, int position) {
return nonNul(key, vector.getDouble(position));
}
}
static
|
FromVector
|
java
|
apache__maven
|
api/maven-api-core/src/main/java/org/apache/maven/api/JavaPathType.java
|
{
"start": 1823,
"end": 2475
}
|
class ____
* and on the Java module path.</p>
*
* <h2>Relationship with Java compiler standard location</h2>
* This enumeration is closely related to the {@link JavaFileManager.Location} enumerations.
* A difference is that the latter enumerates input and output files, while {@code JavaPathType}
* enumerates only input dependencies. Another difference is that {@code JavaPathType} contains
* some enumeration values used only at runtime and therefore not available in {@code javax.tool},
* such as agent paths.
*
* @see org.apache.maven.api.services.DependencyResolverResult#getDispatchedPaths()
*
* @since 4.0.0
*/
@Experimental
public
|
path
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/mappedsuperclass/intermediate/SavingsAccountBase.java
|
{
"start": 417,
"end": 942
}
|
class ____ extends Account {
@Column(name = "SAVACC_WITHDRAWALLIMIT",
precision = 8, scale = 2)
private BigDecimal withdrawalLimit;
protected SavingsAccountBase() {
}
protected SavingsAccountBase(String accountNumber, BigDecimal withdrawalLimit) {
super( accountNumber );
this.withdrawalLimit = withdrawalLimit;
}
public BigDecimal getWithdrawalLimit() {
return withdrawalLimit;
}
public void setWithdrawalLimit(BigDecimal withdrawalLimit) {
this.withdrawalLimit = withdrawalLimit;
}
}
|
SavingsAccountBase
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/common/metrics/KafkaMetricsContextTest.java
|
{
"start": 1156,
"end": 3318
}
|
class ____ {
private static final String SAMPLE_NAMESPACE = "sample-ns";
private static final String LABEL_A_KEY = "label-a";
private static final String LABEL_A_VALUE = "label-a-value";
private String namespace;
private Map<String, String> labels;
private KafkaMetricsContext context;
@BeforeEach
public void beforeEach() {
namespace = SAMPLE_NAMESPACE;
labels = new HashMap<>();
labels.put(LABEL_A_KEY, LABEL_A_VALUE);
}
@Test
public void testCreationWithValidNamespaceAndNoLabels() {
labels.clear();
context = new KafkaMetricsContext(namespace, labels);
assertEquals(1, context.contextLabels().size());
assertEquals(namespace, context.contextLabels().get(MetricsContext.NAMESPACE));
}
@Test
public void testCreationWithValidNamespaceAndLabels() {
context = new KafkaMetricsContext(namespace, labels);
assertEquals(2, context.contextLabels().size());
assertEquals(namespace, context.contextLabels().get(MetricsContext.NAMESPACE));
assertEquals(LABEL_A_VALUE, context.contextLabels().get(LABEL_A_KEY));
}
@Test
public void testCreationWithValidNamespaceAndNullLabelValues() {
labels.put(LABEL_A_KEY, null);
context = new KafkaMetricsContext(namespace, labels);
assertEquals(2, context.contextLabels().size());
assertEquals(namespace, context.contextLabels().get(MetricsContext.NAMESPACE));
assertNull(context.contextLabels().get(LABEL_A_KEY));
}
@Test
public void testCreationWithNullNamespaceAndLabels() {
context = new KafkaMetricsContext(null, labels);
assertEquals(2, context.contextLabels().size());
assertNull(context.contextLabels().get(MetricsContext.NAMESPACE));
assertEquals(LABEL_A_VALUE, context.contextLabels().get(LABEL_A_KEY));
}
@Test
public void testKafkaMetricsContextLabelsAreImmutable() {
context = new KafkaMetricsContext(namespace, labels);
assertThrows(UnsupportedOperationException.class, () -> context.contextLabels().clear());
}
}
|
KafkaMetricsContextTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlRestValidationTestCase.java
|
{
"start": 874,
"end": 5982
}
|
class ____ extends RemoteClusterAwareEqlRestTestCase {
private static final String indexName = "test_eql";
protected static final String[] existentIndexWithWildcard = new String[] {
indexName + ",inexistent*",
indexName + "*,inexistent*",
"inexistent*," + indexName };
private static final String[] existentIndexWithoutWildcard = new String[] { indexName + ",inexistent", "inexistent," + indexName };
protected static final String[] inexistentIndexNameWithWildcard = new String[] { "inexistent*", "inexistent1*,inexistent2*" };
protected static final String[] inexistentIndexNameWithoutWildcard = new String[] { "inexistent", "inexistent1,inexistent2" };
@Before
public void prepareIndices() throws IOException {
if (provisioningClient().performRequest(new Request("HEAD", "/" + indexName)).getStatusLine().getStatusCode() == 404) {
createIndex(indexName, (String) null);
}
Object[] fieldsAndValues = new Object[] { "event_type", "my_event", "@timestamp", "2020-10-08T12:35:48Z", "val", 0 };
XContentBuilder document = jsonBuilder().startObject();
for (int i = 0; i < fieldsAndValues.length; i += 2) {
document.field((String) fieldsAndValues[i], fieldsAndValues[i + 1]);
}
document.endObject();
final Request request = new Request("POST", "/" + indexName + "/_doc/" + 0);
request.setJsonEntity(Strings.toString(document));
assertOK(provisioningClient().performRequest(request));
assertOK(provisioningAdminClient().performRequest(new Request("POST", "/" + indexName + "/_refresh")));
}
protected abstract String getInexistentIndexErrorMessage();
protected String getInexistentWildcardErrorMessage() {
return getInexistentIndexErrorMessage();
}
protected abstract void assertErrorMessageWhenAllowNoIndicesIsFalse(String reqParameter) throws IOException;
public void testDefaultIndicesOptions() throws IOException {
assertErrorMessages(inexistentIndexNameWithWildcard, EMPTY, getInexistentWildcardErrorMessage());
assertErrorMessages(inexistentIndexNameWithoutWildcard, EMPTY, getInexistentIndexErrorMessage());
assertValidRequestOnIndices(existentIndexWithWildcard, EMPTY);
assertValidRequestOnIndices(existentIndexWithoutWildcard, EMPTY);
}
public void testAllowNoIndicesOption() throws IOException {
boolean allowNoIndices = randomBoolean();
boolean setAllowNoIndices = randomBoolean();
boolean isAllowNoIndices = allowNoIndices || setAllowNoIndices == false;
String reqParameter = setAllowNoIndices ? "?allow_no_indices=" + allowNoIndices : EMPTY;
if (isAllowNoIndices) {
assertErrorMessages(inexistentIndexNameWithWildcard, reqParameter, getInexistentWildcardErrorMessage());
assertErrorMessages(inexistentIndexNameWithoutWildcard, reqParameter, getInexistentIndexErrorMessage());
assertValidRequestOnIndices(existentIndexWithWildcard, reqParameter);
assertValidRequestOnIndices(existentIndexWithoutWildcard, reqParameter);
} else {
assertValidRequestOnIndices(existentIndexWithoutWildcard, reqParameter);
assertErrorMessageWhenAllowNoIndicesIsFalse(reqParameter);
}
}
protected void assertErrorMessages(String[] indices, String reqParameter, String errorMessage) throws IOException {
for (String indexName : indices) {
assertErrorMessage(indexName, reqParameter, errorMessage + "[" + indexPattern(indexName) + "]");
}
}
protected void assertErrorMessage(String indexName, String reqParameter, String errorMessage) throws IOException {
final Request request = createRequest(indexName, reqParameter);
ResponseException exc = expectThrows(ResponseException.class, () -> client().performRequest(request));
assertThat(exc.getResponse().getStatusLine().getStatusCode(), equalTo(404));
assertThat(exc.getMessage(), containsString(errorMessage));
}
private Request createRequest(String indexName, String reqParameter) throws IOException {
final Request request = new Request("POST", "/" + indexPattern(indexName) + "/_eql/search" + reqParameter);
request.setJsonEntity(
Strings.toString(
JsonXContent.contentBuilder()
.startObject()
.field("event_category_field", "event_type")
.field("query", "my_event where true")
.endObject()
)
);
return request;
}
private void assertValidRequestOnIndices(String[] indices, String reqParameter) throws IOException {
for (String indexName : indices) {
final Request request = createRequest(indexName, reqParameter);
Response response = client().performRequest(request);
assertOK(response);
}
}
protected String indexPattern(String index) {
return index;
}
}
|
EqlRestValidationTestCase
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/CriteriaSelectOneToOneUnownedTest.java
|
{
"start": 3267,
"end": 3523
}
|
class ____ {
@Id
private Long id;
private String name;
@OneToOne(optional = false, fetch = FetchType.LAZY)
private Parent parent;
public Child() {
}
public Child(Long id, String name) {
this.id = id;
this.name = name;
}
}
}
|
Child
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_611/SomeClass.java
|
{
"start": 733,
"end": 1001
}
|
class ____ {
private final String value;
public Target(String value) {
this.value = value;
}
public String getValue() {
return value;
}
}
}
public static
|
Target
|
java
|
processing__processing4
|
app/src/processing/app/ui/ColorChooser.java
|
{
"start": 17017,
"end": 18131
}
|
class ____ extends PlainDocument {
NumberField parentField;
public NumberDocument(NumberField parentField) {
this.parentField = parentField;
//System.out.println("setting parent to " + parentSelector);
}
public void insertString(int offs, String str, AttributeSet a)
throws BadLocationException {
if (str == null) return;
char[] chars = str.toCharArray();
int charCount = 0;
// remove any non-digit chars
for (int i = 0; i < chars.length; i++) {
boolean ok = Character.isDigit(chars[i]);
if (parentField.allowHex) {
if ((chars[i] >= 'A') && (chars[i] <= 'F')) ok = true;
if ((chars[i] >= 'a') && (chars[i] <= 'f')) ok = true;
if ((offs == 0) && (i == 0) && (chars[i] == '#')) ok = true;
}
if (ok) {
if (charCount != i) { // shift if necessary
chars[charCount] = chars[i];
}
charCount++;
}
}
super.insertString(offs, new String(chars, 0, charCount), a);
// can't call any sort of methods on the enclosing
|
NumberDocument
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/querydsl/query/RangeQuery.java
|
{
"start": 645,
"end": 3465
}
|
class ____ extends Query {
private final String field;
private final Object lower, upper;
private final boolean includeLower, includeUpper;
private final String format;
private final ZoneId zoneId;
public RangeQuery(Source source, String field, Object lower, boolean includeLower, Object upper, boolean includeUpper, ZoneId zoneId) {
this(source, field, lower, includeLower, upper, includeUpper, null, zoneId);
}
public RangeQuery(
Source source,
String field,
Object lower,
boolean includeLower,
Object upper,
boolean includeUpper,
String format,
ZoneId zoneId
) {
super(source);
this.field = field;
this.lower = lower;
this.upper = upper;
this.includeLower = includeLower;
this.includeUpper = includeUpper;
this.format = format;
this.zoneId = zoneId;
}
public String field() {
return field;
}
public Object lower() {
return lower;
}
public Object upper() {
return upper;
}
public boolean includeLower() {
return includeLower;
}
public boolean includeUpper() {
return includeUpper;
}
public String format() {
return format;
}
public ZoneId zoneId() {
return zoneId;
}
@Override
protected QueryBuilder asBuilder() {
RangeQueryBuilder queryBuilder = rangeQuery(field).from(lower, includeLower).to(upper, includeUpper);
if (Strings.hasText(format)) {
queryBuilder.format(format);
}
if (zoneId != null) {
queryBuilder.timeZone(zoneId.getId());
}
return queryBuilder;
}
@Override
public int hashCode() {
return Objects.hash(field, lower, upper, includeLower, includeUpper, format, zoneId);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
RangeQuery other = (RangeQuery) obj;
return Objects.equals(field, other.field)
&& Objects.equals(includeLower, other.includeLower)
&& Objects.equals(includeUpper, other.includeUpper)
&& Objects.equals(lower, other.lower)
&& Objects.equals(upper, other.upper)
&& Objects.equals(format, other.format)
&& Objects.equals(zoneId, other.zoneId);
}
@Override
protected String innerToString() {
return field + ":" + (includeLower ? "[" : "(") + lower + ", " + upper + (includeUpper ? "]" : ")") + "@" + zoneId.getId();
}
@Override
public boolean containsPlan() {
return false;
}
}
|
RangeQuery
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/OracleDataDrivenDBInputFormat.java
|
{
"start": 1379,
"end": 2513
}
|
class ____<T extends DBWritable>
extends DataDrivenDBInputFormat<T> implements Configurable {
/**
* @return the DBSplitter implementation to use to divide the table/query into InputSplits.
*/
@Override
protected DBSplitter getSplitter(int sqlDataType) {
switch (sqlDataType) {
case Types.DATE:
case Types.TIME:
case Types.TIMESTAMP:
return new OracleDateSplitter();
default:
return super.getSplitter(sqlDataType);
}
}
@Override
protected RecordReader<LongWritable, T> createDBRecordReader(DBInputSplit split,
Configuration conf) throws IOException {
DBConfiguration dbConf = getDBConf();
@SuppressWarnings("unchecked")
Class<T> inputClass = (Class<T>) (dbConf.getInputClass());
try {
// Use Oracle-specific db reader
return new OracleDataDrivenDBRecordReader<T>(split, inputClass,
conf, createConnection(), dbConf, dbConf.getInputConditions(),
dbConf.getInputFieldNames(), dbConf.getInputTableName());
} catch (SQLException ex) {
throw new IOException(ex.getMessage());
}
}
}
|
OracleDataDrivenDBInputFormat
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockito/internal/junit/JUnitRuleTest.java
|
{
"start": 577,
"end": 1846
}
|
class ____ {
@Rule public SafeJUnitRule rule = new SafeJUnitRule(MockitoJUnit.rule());
@Mock IMethods mock;
@Test
public void injects_into_test_case() throws Throwable {
assertTrue(mockingDetails(mock).isMock());
}
@Test
public void rethrows_exception() throws Throwable {
rule.expectFailure(RuntimeException.class, "foo");
throw new RuntimeException("foo");
}
@SuppressWarnings({"CheckReturnValue", "MockitoUsage"})
@Test
public void detects_invalid_mockito_usage_on_success() throws Throwable {
rule.expectFailure(UnfinishedStubbingException.class);
when(mock.simpleMethod());
}
@SuppressWarnings({"CheckReturnValue", "MockitoUsage"})
@Test
public void does_not_check_invalid_mockito_usage_on_failure() throws Throwable {
// This intended behavior is questionable
// However, it was like that since the beginning of JUnit rule support
// Users never questioned this behavior. Hence, let's stick to it unless we have more data
rule.expectFailure(RuntimeException.class, "foo");
Mockito.when(mock.simpleMethod()); // <--- unfinished stubbing
throw new RuntimeException("foo"); // <--- some failure
}
}
|
JUnitRuleTest
|
java
|
spring-projects__spring-framework
|
spring-orm/src/main/java/org/springframework/orm/jpa/JpaVendorAdapter.java
|
{
"start": 966,
"end": 1287
}
|
interface ____ allows to plug in vendor-specific behavior
* into Spring's EntityManagerFactory creators. Serves as single
* configuration point for all vendor-specific properties.
*
* @author Juergen Hoeller
* @author Rod Johnson
* @since 2.0
* @see AbstractEntityManagerFactoryBean#setJpaVendorAdapter
*/
public
|
that
|
java
|
alibaba__nacos
|
config/src/main/java/com/alibaba/nacos/config/server/service/ClientIpWhiteList.java
|
{
"start": 1118,
"end": 2921
}
|
class ____ {
public static final String CLIENT_IP_WHITELIST_METADATA = "com.alibaba.nacos.metadata.clientIpWhitelist";
private static final AtomicReference<List<String>> CLIENT_IP_WHITELIST = new AtomicReference<>(
new ArrayList<>());
private static Boolean isOpen = false;
/**
* Judge whether specified client ip includes in the whitelist.
*
* @param clientIp clientIp string value.
* @return Judge result.
*/
public static boolean isLegalClient(String clientIp) {
if (StringUtils.isBlank(clientIp)) {
throw new IllegalArgumentException("clientIp is empty");
}
clientIp = clientIp.trim();
if (CLIENT_IP_WHITELIST.get().contains(clientIp)) {
return true;
}
return false;
}
/**
* Whether start client ip whitelist.
*
* @return true: enable ; false disable
*/
public static boolean isEnableWhitelist() {
return isOpen;
}
/**
* Load white lists based content parameter value.
*
* @param content content string value.
*/
public static void load(String content) {
if (StringUtils.isBlank(content)) {
DEFAULT_LOG.warn("clientIpWhiteList is blank.close whitelist.");
isOpen = false;
CLIENT_IP_WHITELIST.get().clear();
return;
}
DEFAULT_LOG.warn("[clientIpWhiteList] {}", content);
try {
AclInfo acl = JacksonUtils.toObj(content, AclInfo.class);
isOpen = acl.getIsOpen();
CLIENT_IP_WHITELIST.set(acl.getIps());
} catch (Exception ioe) {
DEFAULT_LOG.error("failed to load clientIpWhiteList, " + ioe.toString(), ioe);
}
}
}
|
ClientIpWhiteList
|
java
|
alibaba__fastjson
|
src/test/java/com/derbysoft/spitfire/fastjson/dto/PaymentType.java
|
{
"start": 53,
"end": 94
}
|
enum ____ {
POA,
PREPAY
}
|
PaymentType
|
java
|
apache__avro
|
lang/java/mapred/src/main/java/org/apache/avro/mapred/AvroMapper.java
|
{
"start": 1142,
"end": 1298
}
|
class ____ pass their subclass to
* {@link AvroJob#setMapperClass(JobConf, Class)}, overriding
* {@link #map(Object, AvroCollector, Reporter)}.
*/
public
|
and
|
java
|
netty__netty
|
codec-dns/src/test/java/io/netty/handler/codec/dns/DnsRecordTypeTest.java
|
{
"start": 2097,
"end": 2445
}
|
class ____.
*/
@Test
public void testEquals() throws Exception {
for (DnsRecordType t1 : allTypes()) {
for (DnsRecordType t2 : allTypes()) {
if (t1 != t2) {
assertNotEquals(t1, t2);
}
}
}
}
/**
* Test of find method, of
|
DnsRecordType
|
java
|
spring-projects__spring-boot
|
module/spring-boot-web-server/src/main/java/org/springframework/boot/web/server/Http2.java
|
{
"start": 911,
"end": 1318
}
|
class ____ {
/**
* Whether to enable HTTP/2 support, if the current environment supports it.
*/
private boolean enabled;
/**
* Return whether to enable HTTP/2 support, if the current environment supports it.
* @return {@code true} to enable HTTP/2 support
*/
public boolean isEnabled() {
return this.enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
}
|
Http2
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/internal/util/collections/BoundedConcurrentHashMap.java
|
{
"start": 57815,
"end": 58073
}
|
class ____ extends HashIterator implements Iterator<V>, Enumeration<V> {
@Override
public V next() {
return super.nextEntry().value;
}
@Override
public V nextElement() {
return super.nextEntry().value;
}
}
/**
* Custom Entry
|
ValueIterator
|
java
|
quarkusio__quarkus
|
extensions/kubernetes-config/runtime/src/main/java/io/quarkus/kubernetes/config/runtime/OrdinalData.java
|
{
"start": 47,
"end": 647
}
|
enum ____ {
CONFIG_MAP(
270, // this is higher than the file system or jar ordinals, but lower than env vars
284 // this is one less than the ordinal of Secret
),
SECRET(
285, // this is one less than the ordinal of ConfigMap
299 // this is one less than env vars
);
private final int base;
private final int max;
OrdinalData(int base, int max) {
this.base = base;
this.max = max;
}
public int getBase() {
return base;
}
public int getMax() {
return max;
}
}
|
OrdinalData
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.