language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java
|
{
"start": 3250,
"end": 54610
}
|
class ____ extends DateHistogramAggregatorTestCase {
/**
* A date that is always "searchable" because it is indexed.
*/
private static final String SEARCHABLE_DATE = "searchable_date";
private static final List<String> DATASET = Arrays.asList(
"2010-03-12T01:07:45",
"2010-04-27T03:43:34",
"2012-05-18T04:11:00",
"2013-05-29T05:11:31",
"2013-10-31T08:24:05",
"2015-02-13T13:09:32",
"2015-06-24T13:47:43",
"2015-11-13T16:14:34",
"2016-03-04T17:09:50",
"2017-12-12T22:55:46"
);
public void testBooleanFieldUnsupported() throws IOException {
final String fieldName = "bogusBoolean";
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testCase(iw -> {
Document d = new Document();
d.add(new SortedNumericDocValuesField(fieldName, 0));
iw.addDocument(d);
},
a -> {},
new AggTestConfig(
new DateHistogramAggregationBuilder("name").calendarInterval(DateHistogramInterval.HOUR).field(fieldName),
new BooleanFieldMapper.BooleanFieldType(fieldName)
)
));
assertThat(e.getMessage(), equalTo("Field [bogusBoolean] of type [boolean] is not supported for aggregation [date_histogram]"));
}
public void testMatchNoDocs() throws IOException {
testSearchCase(
new MatchNoDocsQuery(),
DATASET,
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(AGGREGABLE_DATE),
histogram -> assertEquals(0, histogram.getBuckets().size()),
false
);
testSearchCase(
new MatchNoDocsQuery(),
DATASET,
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(AGGREGABLE_DATE),
histogram -> assertEquals(0, histogram.getBuckets().size()),
false
);
}
public void testMatchAllDocs() throws IOException {
Query query = new MatchAllDocsQuery();
List<String> foo = new ArrayList<>();
for (int i = 0; i < 1000; i++) {
foo.add(DATASET.get(randomIntBetween(0, DATASET.size() - 1)));
}
testSearchCase(
query,
foo,
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d"))
.field(AGGREGABLE_DATE)
.order(BucketOrder.count(false)),
histogram -> assertEquals(8, histogram.getBuckets().size()),
false
);
testSearchCase(
query,
DATASET,
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(AGGREGABLE_DATE),
histogram -> assertEquals(8, histogram.getBuckets().size()),
false
);
testSearchCase(
query,
DATASET,
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> assertEquals(6, histogram.getBuckets().size()),
false
);
testSearchCase(
query,
DATASET,
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(AGGREGABLE_DATE),
histogram -> assertEquals(8, histogram.getBuckets().size()),
false
);
testSearchCase(
query,
DATASET,
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> assertEquals(6, histogram.getBuckets().size()),
false
);
}
public void testAsSubAgg() throws IOException {
AggregationBuilder builder = new TermsAggregationBuilder("k1").field("k1")
.subAggregation(new DateHistogramAggregationBuilder("dh").field(AGGREGABLE_DATE).calendarInterval(DateHistogramInterval.YEAR));
asSubAggTestCase(builder, (StringTerms terms) -> {
StringTerms.Bucket a = terms.getBucketByKey("a");
InternalDateHistogram adh = a.getAggregations().get("dh");
assertThat(
adh.getBuckets().stream().map(bucket -> bucket.getKey().toString()).collect(toList()),
equalTo(List.of("2020-01-01T00:00Z", "2021-01-01T00:00Z"))
);
StringTerms.Bucket b = terms.getBucketByKey("b");
InternalDateHistogram bdh = b.getAggregations().get("dh");
assertThat(
bdh.getBuckets().stream().map(bucket -> bucket.getKey().toString()).collect(toList()),
equalTo(List.of("2020-01-01T00:00Z"))
);
});
builder = new TermsAggregationBuilder("k2").field("k2").subAggregation(builder);
asSubAggTestCase(builder, (StringTerms terms) -> {
StringTerms.Bucket a = terms.getBucketByKey("a");
StringTerms ak1 = a.getAggregations().get("k1");
StringTerms.Bucket ak1a = ak1.getBucketByKey("a");
InternalDateHistogram ak1adh = ak1a.getAggregations().get("dh");
assertThat(
ak1adh.getBuckets().stream().map(bucket -> bucket.getKey().toString()).collect(toList()),
equalTo(List.of("2020-01-01T00:00Z", "2021-01-01T00:00Z"))
);
StringTerms.Bucket b = terms.getBucketByKey("b");
StringTerms bk1 = b.getAggregations().get("k1");
StringTerms.Bucket bk1a = bk1.getBucketByKey("a");
InternalDateHistogram bk1adh = bk1a.getAggregations().get("dh");
assertThat(
bk1adh.getBuckets().stream().map(bucket -> bucket.getKey().toString()).collect(toList()),
equalTo(List.of("2021-01-01T00:00Z"))
);
StringTerms.Bucket bk1b = bk1.getBucketByKey("b");
InternalDateHistogram bk1bdh = bk1b.getAggregations().get("dh");
assertThat(
bk1bdh.getBuckets().stream().map(bucket -> bucket.getKey().toString()).collect(toList()),
equalTo(List.of("2020-01-01T00:00Z"))
);
});
}
public void testNoDocs() throws IOException {
Query query = new MatchNoDocsQuery();
List<String> dates = Collections.emptyList();
Consumer<DateHistogramAggregationBuilder> aggregation = agg -> agg.calendarInterval(DateHistogramInterval.YEAR)
.field(AGGREGABLE_DATE);
testSearchCase(query, dates, aggregation, histogram -> assertEquals(0, histogram.getBuckets().size()), false);
testSearchCase(query, dates, aggregation, histogram -> assertEquals(0, histogram.getBuckets().size()), false);
aggregation = agg -> agg.fixedInterval(new DateHistogramInterval("365d")).field(AGGREGABLE_DATE);
testSearchCase(query, dates, aggregation, histogram -> assertEquals(0, histogram.getBuckets().size()), false);
testSearchCase(query, dates, aggregation, histogram -> assertEquals(0, histogram.getBuckets().size()), false);
}
public void testAggregateWrongField() throws IOException {
testSearchCase(
new MatchAllDocsQuery(),
DATASET,
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field("wrong_field"),
histogram -> assertEquals(0, histogram.getBuckets().size()),
false
);
testSearchCase(
new MatchAllDocsQuery(),
DATASET,
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field("wrong_field"),
histogram -> assertEquals(0, histogram.getBuckets().size()),
false
);
}
public void testIntervalYear() throws IOException {
testSearchCase(
LongPoint.newRangeQuery(SEARCHABLE_DATE, asLong("2015-01-01"), asLong("2017-12-31")),
DATASET,
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(AGGREGABLE_DATE),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(3, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
},
false
);
}
public void testIntervalMonth() throws IOException {
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList("2017-01-01", "2017-02-02", "2017-02-03", "2017-03-04", "2017-03-05", "2017-03-06"),
aggregation -> aggregation.calendarInterval(DateHistogramInterval.MONTH).field(AGGREGABLE_DATE),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(3, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-03-01T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
},
false
);
}
public void testIntervalDay() throws IOException {
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"),
aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(4, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
bucket = buckets.get(3);
assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
},
false
);
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"),
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("24h")).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(4, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
bucket = buckets.get(3);
assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
},
false
);
}
public void testIntervalHour() throws IOException {
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList(
"2017-02-01T09:02:00.000Z",
"2017-02-01T09:35:00.000Z",
"2017-02-01T10:15:00.000Z",
"2017-02-01T13:06:00.000Z",
"2017-02-01T14:04:00.000Z",
"2017-02-01T14:05:00.000Z",
"2017-02-01T15:59:00.000Z",
"2017-02-01T16:06:00.000Z",
"2017-02-01T16:48:00.000Z",
"2017-02-01T16:59:00.000Z"
),
aggregation -> aggregation.calendarInterval(DateHistogramInterval.HOUR).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(6, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T09:00:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-01T10:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-01T13:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(3);
assertEquals("2017-02-01T14:00:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(4);
assertEquals("2017-02-01T15:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(5);
assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
},
false
);
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList(
"2017-02-01T09:02:00.000Z",
"2017-02-01T09:35:00.000Z",
"2017-02-01T10:15:00.000Z",
"2017-02-01T13:06:00.000Z",
"2017-02-01T14:04:00.000Z",
"2017-02-01T14:05:00.000Z",
"2017-02-01T15:59:00.000Z",
"2017-02-01T16:06:00.000Z",
"2017-02-01T16:48:00.000Z",
"2017-02-01T16:59:00.000Z"
),
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("60m")).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(6, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T09:00:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-01T10:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-01T13:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(3);
assertEquals("2017-02-01T14:00:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(4);
assertEquals("2017-02-01T15:00:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(5);
assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
},
false
);
}
public void testIntervalMinute() throws IOException {
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList(
"2017-02-01T09:02:35.000Z",
"2017-02-01T09:02:59.000Z",
"2017-02-01T09:15:37.000Z",
"2017-02-01T09:16:04.000Z",
"2017-02-01T09:16:42.000Z"
),
aggregation -> aggregation.calendarInterval(DateHistogramInterval.MINUTE).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(3, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-01T09:15:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
},
false
);
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList(
"2017-02-01T09:02:35.000Z",
"2017-02-01T09:02:59.000Z",
"2017-02-01T09:15:37.000Z",
"2017-02-01T09:16:04.000Z",
"2017-02-01T09:16:42.000Z"
),
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("60s")).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(3, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-01T09:15:00.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
},
false
);
}
public void testIntervalSecond() throws IOException {
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList(
"2017-02-01T00:00:05.015Z",
"2017-02-01T00:00:11.299Z",
"2017-02-01T00:00:11.074Z",
"2017-02-01T00:00:37.688Z",
"2017-02-01T00:00:37.210Z",
"2017-02-01T00:00:37.380Z"
),
aggregation -> aggregation.calendarInterval(DateHistogramInterval.SECOND).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(3, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
},
false
);
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList(
"2017-02-01T00:00:05.015Z",
"2017-02-01T00:00:11.299Z",
"2017-02-01T00:00:11.074Z",
"2017-02-01T00:00:37.688Z",
"2017-02-01T00:00:37.210Z",
"2017-02-01T00:00:37.380Z"
),
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("1000ms")).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(3, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
},
false
);
}
public void testNanosIntervalSecond() throws IOException {
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList(
"2017-02-01T00:00:05.015298384Z",
"2017-02-01T00:00:11.299954583Z",
"2017-02-01T00:00:11.074986434Z",
"2017-02-01T00:00:37.688314602Z",
"2017-02-01T00:00:37.210328172Z",
"2017-02-01T00:00:37.380889483Z"
),
aggregation -> aggregation.calendarInterval(DateHistogramInterval.SECOND).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(3, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
},
true
);
testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList(
"2017-02-01T00:00:05.015298384Z",
"2017-02-01T00:00:11.299954583Z",
"2017-02-01T00:00:11.074986434Z",
"2017-02-01T00:00:37.688314602Z",
"2017-02-01T00:00:37.210328172Z",
"2017-02-01T00:00:37.380889483Z"
),
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("1000ms")).field(AGGREGABLE_DATE).minDocCount(1L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(3, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString());
assertEquals(2, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
},
true
);
}
public void testIntervalEmpty() throws IOException {
BooleanQuery.Builder boolFilterBuilder = new BooleanQuery.Builder();
boolFilterBuilder.add(LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2005"), Long.MAX_VALUE), BooleanClause.Occur.MUST);
boolFilterBuilder.add(LongPoint.newRangeQuery(AGGREGABLE_DATE, Long.MIN_VALUE, asLong("2002-09-26")), BooleanClause.Occur.MUST);
Query query = boolFilterBuilder.build();
testSearchCase(
query,
DATASET,
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(AGGREGABLE_DATE),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(0, buckets.size());
},
false
);
}
public void testMinDocCount() throws IOException {
Query query = LongPoint.newRangeQuery(SEARCHABLE_DATE, asLong("2017-02-01T00:00:00.000Z"), asLong("2017-02-01T00:00:30.000Z"));
List<String> timestamps = Arrays.asList(
"2017-02-01T00:00:05.015Z",
"2017-02-01T00:00:11.299Z",
"2017-02-01T00:00:11.074Z",
"2017-02-01T00:00:13.688Z",
"2017-02-01T00:00:21.380Z"
);
// 5 sec interval with minDocCount = 0
testSearchCase(
query,
timestamps,
aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(AGGREGABLE_DATE).minDocCount(0L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(4, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
bucket = buckets.get(1);
assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
bucket = buckets.get(2);
assertEquals("2017-02-01T00:00:15.000Z", bucket.getKeyAsString());
assertEquals(0, bucket.getDocCount());
bucket = buckets.get(3);
assertEquals("2017-02-01T00:00:20.000Z", bucket.getKeyAsString());
assertEquals(1, bucket.getDocCount());
},
false
);
// 5 sec interval with minDocCount = 3
testSearchCase(
query,
timestamps,
aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(AGGREGABLE_DATE).minDocCount(3L),
histogram -> {
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
assertEquals(1, buckets.size());
Histogram.Bucket bucket = buckets.get(0);
assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString());
assertEquals(3, bucket.getDocCount());
},
false
);
}
public void testFixedWithCalendar() throws IOException {
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"),
aggregation -> aggregation.fixedInterval(DateHistogramInterval.WEEK).field(AGGREGABLE_DATE),
histogram -> {},
false
)
);
assertThat(
e.getMessage(),
equalTo(
"failed to parse setting [date_histogram.fixedInterval] with value [1w] as a time value: "
+ "unit is missing or unrecognized"
)
);
}
public void testCalendarWithFixed() throws IOException {
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"),
aggregation -> aggregation.calendarInterval(new DateHistogramInterval("5d")).field(AGGREGABLE_DATE),
histogram -> {},
false
)
);
assertThat(e.getMessage(), equalTo("The supplied interval [5d] could not be parsed as a calendar interval."));
}
public void testCalendarAndThenFixed() throws IOException {
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"),
aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY)
.fixedInterval(new DateHistogramInterval("2d"))
.field(AGGREGABLE_DATE),
histogram -> {},
false
)
);
assertThat(e.getMessage(), equalTo("Cannot use [fixed_interval] with [calendar_interval] configuration option."));
}
public void testFixedAndThenCalendar() throws IOException {
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"),
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("2d"))
.calendarInterval(DateHistogramInterval.DAY)
.field(AGGREGABLE_DATE),
histogram -> {},
false
)
);
assertThat(e.getMessage(), equalTo("Cannot use [calendar_interval] with [fixed_interval] configuration option."));
}
public void testOverlappingBounds() {
IllegalArgumentException ex = expectThrows(
IllegalArgumentException.class,
() -> testSearchCase(
new MatchAllDocsQuery(),
Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"),
aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY)
.hardBounds(new LongBounds("2010-01-01", "2020-01-01"))
.extendedBounds(new LongBounds("2009-01-01", "2021-01-01"))
.field(AGGREGABLE_DATE),
histogram -> {},
false
)
);
assertThat(
ex.getMessage(),
equalTo(
"Extended bounds have to be inside hard bounds, "
+ "hard bounds: [2010-01-01--2020-01-01], extended bounds: [2009-01-01--2021-01-01]"
)
);
}
public void testFewRoundingPointsUsesFromRange() throws IOException {
aggregationImplementationChoiceTestCase(
aggregableDateFieldType(false, true, DateFormatter.forPattern("yyyy")),
IntStream.range(2000, 2010).mapToObj(Integer::toString).collect(toList()),
new DateHistogramAggregationBuilder("test").field(AGGREGABLE_DATE).calendarInterval(DateHistogramInterval.YEAR),
true
);
}
public void testManyRoundingPointsDoesNotUseFromRange() throws IOException {
aggregationImplementationChoiceTestCase(
aggregableDateFieldType(false, true, DateFormatter.forPattern("yyyy")),
IntStream.range(2000, 3000).mapToObj(Integer::toString).collect(toList()),
new DateHistogramAggregationBuilder("test").field(AGGREGABLE_DATE).calendarInterval(DateHistogramInterval.YEAR),
false
);
}
/**
* Nanos doesn't use from range, but we don't get the fancy compile into
* filters because of potential loss of precision.
*/
public void testNanosDoesUseFromRange() throws IOException {
aggregationImplementationChoiceTestCase(
aggregableDateFieldType(true, true, DateFormatter.forPattern("yyyy")),
List.of("2017", "2018"),
new DateHistogramAggregationBuilder("test").field(AGGREGABLE_DATE).calendarInterval(DateHistogramInterval.YEAR),
true
);
}
public void testFarFutureDoesNotUseFromRange() throws IOException {
aggregationImplementationChoiceTestCase(
aggregableDateFieldType(false, true, DateFormatter.forPattern("yyyyyy")),
List.of("402017", "402018"),
new DateHistogramAggregationBuilder("test").field(AGGREGABLE_DATE).calendarInterval(DateHistogramInterval.YEAR),
false
);
}
public void testMissingValueDoesNotUseFromRange() throws IOException {
aggregationImplementationChoiceTestCase(
aggregableDateFieldType(false, true, DateFormatter.forPattern("yyyy")),
List.of("2017", "2018"),
new DateHistogramAggregationBuilder("test").field(AGGREGABLE_DATE).calendarInterval(DateHistogramInterval.YEAR).missing("2020"),
false
);
}
public void testExtendedBoundsUsesFromRange() throws IOException {
aggregationImplementationChoiceTestCase(
aggregableDateFieldType(false, true, DateFormatter.forPattern("yyyy")),
List.of("2017", "2018"),
List.of("2016", "2017", "2018", "2019"),
new DateHistogramAggregationBuilder("test").field(AGGREGABLE_DATE)
.calendarInterval(DateHistogramInterval.YEAR)
.extendedBounds(new LongBounds("2016", "2019"))
.minDocCount(0),
true
);
}
public void testHardBoundsUsesFromRange() throws IOException {
aggregationImplementationChoiceTestCase(
aggregableDateFieldType(false, true, DateFormatter.forPattern("yyyy")),
List.of("2016", "2017", "2018", "2019"),
List.of("2017", "2018"),
new DateHistogramAggregationBuilder("test").field(AGGREGABLE_DATE)
.calendarInterval(DateHistogramInterval.YEAR)
.hardBounds(new LongBounds("2017", "2019")),
true
);
}
public void testOneBucketOptimized() throws IOException {
AggregationBuilder builder = new DateHistogramAggregationBuilder("d").field("f").calendarInterval(DateHistogramInterval.DAY);
CheckedConsumer<RandomIndexWriter, IOException> buildIndex = iw -> {
long start = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-01-01T00:00:01");
for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS; i++) {
long date = start + i;
iw.addDocument(List.of(new LongPoint("f", date), new NumericDocValuesField("f", date)));
}
for (int i = 0; i < 10; i++) {
iw.addDocument(List.of());
}
};
DateFieldMapper.DateFieldType ft = new DateFieldMapper.DateFieldType("f");
// Exists queries convert to MatchNone if this isn't defined
FieldNamesFieldMapper.FieldNamesFieldType fnft = FieldNamesFieldMapper.FieldNamesFieldType.get(true);
debugTestCase(
builder,
new MatchAllDocsQuery(),
buildIndex,
(InternalDateHistogram result, Class<? extends Aggregator> impl, Map<String, Map<String, Object>> debug) -> {
assertThat(result.getBuckets(), hasSize(1));
assertThat(result.getBuckets().get(0).getKeyAsString(), equalTo("2020-01-01T00:00:00.000Z"));
assertThat(result.getBuckets().get(0).getDocCount(), equalTo(5000L));
assertThat(impl, equalTo(DateHistogramAggregator.FromDateRange.class));
assertMap(
debug,
matchesMap().entry(
"d",
matchesMap().entry("delegate", "RangeAggregator.FromFilters")
.entry(
"delegate_debug",
matchesMap().entry("ranges", 1)
.entry("average_docs_per_range", 5010.0)
.entry("delegate", "FilterByFilterAggregator")
.entry(
"delegate_debug",
matchesMap().entry("segments_with_doc_count_field", 0)
.entry("segments_with_deleted_docs", 0)
.entry("segments_counted", greaterThan(0))
.entry("segments_collected", 0)
.entry(
"filters",
matchesList().item(
matchesMap().entry("query", "FieldExistsQuery [field=f]")
.entry("segments_counted_in_constant_time", greaterThan(0))
)
)
)
)
)
);
},
ft,
fnft
);
}
/**
* If there is a doc count field and a single bucket it is still
* faster to use filter-by-filter collection mode so we use it.
*/
public void testOneBucketWithDocCountUsesFilterByFilter() throws IOException {
AggregationBuilder builder = new DateHistogramAggregationBuilder("d").field("f").calendarInterval(DateHistogramInterval.DAY);
CheckedConsumer<RandomIndexWriter, IOException> buildIndex = iw -> {
long start = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-01-01T00:00:01");
for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS + 10; i++) {
long date = start + i;
iw.addDocument(List.of(new LongPoint("f", date), new NumericDocValuesField("f", date), DocCountFieldMapper.field(2)));
}
};
DateFieldMapper.DateFieldType ft = new DateFieldMapper.DateFieldType("f");
// Exists queries convert to MatchNone if this isn't defined
FieldNamesFieldMapper.FieldNamesFieldType fnft = FieldNamesFieldMapper.FieldNamesFieldType.get(true);
debugTestCase(
builder,
new MatchAllDocsQuery(),
buildIndex,
(InternalDateHistogram result, Class<? extends Aggregator> impl, Map<String, Map<String, Object>> debug) -> {
assertThat(result.getBuckets(), hasSize(1));
assertThat(result.getBuckets().get(0).getKeyAsString(), equalTo("2020-01-01T00:00:00.000Z"));
assertThat(result.getBuckets().get(0).getDocCount(), equalTo(10020L));
assertThat(impl, equalTo(DateHistogramAggregator.FromDateRange.class));
assertMap(
debug,
matchesMap().entry(
"d",
matchesMap().entry("delegate", "RangeAggregator.FromFilters")
.entry(
"delegate_debug",
matchesMap().entry("ranges", 1)
.entry("average_docs_per_range", 5010.0)
.entry("delegate", "FilterByFilterAggregator")
.entry(
"delegate_debug",
matchesMap().entry("segments_with_doc_count_field", greaterThan(0))
.entry("segments_with_deleted_docs", 0)
.entry("segments_counted", greaterThan(0))
.entry("segments_collected", 0)
.entry(
"filters",
matchesList().item(
matchesMap().entry("query", "*:*").entry("segments_counted_in_constant_time", 0)
)
)
)
)
)
);
},
ft,
fnft
);
}
/**
* If there is a doc count field and more than one bucket it is
* not faster to use filter-by-filter collection mode so we don't
* use it.
*/
public void testTwoBucketsWithDocCountUsesTraditionalCollection() throws IOException {
AggregationBuilder builder = new DateHistogramAggregationBuilder("d").field("f").calendarInterval(DateHistogramInterval.DAY);
CheckedConsumer<RandomIndexWriter, IOException> buildIndex = iw -> {
long[] dates = new long[] {
DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-01-01T00:00:01"),
DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-01-02T00:00:01") };
for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2 + 10; i++) {
long date = dates[i % 2];
iw.addDocument(
List.of(
new LongPoint("f", date),
new NumericDocValuesField("f", date),
new CustomTermFreqField(DocCountFieldMapper.NAME, DocCountFieldMapper.NAME, 2)
)
);
}
};
DateFieldMapper.DateFieldType ft = new DateFieldMapper.DateFieldType("f");
// Exists queries convert to MatchNone if this isn't defined
FieldNamesFieldMapper.FieldNamesFieldType fnft = FieldNamesFieldMapper.FieldNamesFieldType.get(true);
debugTestCase(
builder,
new MatchAllDocsQuery(),
buildIndex,
(InternalDateHistogram result, Class<? extends Aggregator> impl, Map<String, Map<String, Object>> debug) -> {
assertThat(result.getBuckets(), hasSize(2));
assertThat(result.getBuckets().get(0).getKeyAsString(), equalTo("2020-01-01T00:00:00.000Z"));
assertThat(result.getBuckets().get(0).getDocCount(), equalTo(10010L));
assertThat(result.getBuckets().get(1).getKeyAsString(), equalTo("2020-01-02T00:00:00.000Z"));
assertThat(result.getBuckets().get(1).getDocCount(), equalTo(10010L));
assertThat(impl, equalTo(DateHistogramAggregator.FromDateRange.class));
assertMap(
debug,
matchesMap().entry(
"d",
matchesMap().entry("delegate", "RangeAggregator.NoOverlap")
.entry(
"delegate_debug",
matchesMap().entry("ranges", 2)
.entry("average_docs_per_range", 5005.0)
.entry("singletons", greaterThanOrEqualTo(1))
.entry("non-singletons", 0)
)
)
);
},
ft,
fnft
);
}
private void aggregationImplementationChoiceTestCase(
DateFieldMapper.DateFieldType ft,
List<String> data,
DateHistogramAggregationBuilder builder,
boolean usesFromRange
) throws IOException {
aggregationImplementationChoiceTestCase(ft, data, data, builder, usesFromRange);
}
private void aggregationImplementationChoiceTestCase(
DateFieldMapper.DateFieldType ft,
List<String> data,
List<String> resultingBucketKeys,
DateHistogramAggregationBuilder builder,
boolean usesFromRange
) throws IOException {
try (Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
for (String d : data) {
long instant = asLong(d, ft);
indexWriter.addDocument(
List.of(new SortedNumericDocValuesField(AGGREGABLE_DATE, instant), new LongPoint(AGGREGABLE_DATE, instant))
);
}
try (
IndexReader reader = indexWriter.getReader();
AggregationContext context = createAggregationContext(reader, new MatchAllDocsQuery(), ft)
) {
Aggregator agg = createAggregator(builder, context);
Matcher<Aggregator> matcher = instanceOf(DateHistogramAggregator.FromDateRange.class);
if (usesFromRange == false) {
matcher = not(matcher);
}
assertThat(agg, matcher);
agg.preCollection();
context.searcher().search(context.query(), agg.asCollector());
InternalDateHistogram result = (InternalDateHistogram) agg.buildTopLevel();
result = (InternalDateHistogram) InternalAggregationTestCase.reduce(
List.of(result),
new AggregationReduceContext.ForFinal(
context.bigArrays(),
null,
() -> false,
builder,
new MultiBucketConsumerService.MultiBucketConsumer(context.maxBuckets(), context.breaker()),
PipelineTree.EMPTY
)
);
assertThat(
result.getBuckets().stream().map(InternalDateHistogram.Bucket::getKeyAsString).collect(toList()),
equalTo(resultingBucketKeys)
);
}
}
}
public void testIllegalInterval() throws IOException {
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> testSearchCase(
new MatchAllDocsQuery(),
Collections.emptyList(),
aggregation -> aggregation.calendarInterval(new DateHistogramInterval("foobar")).field(AGGREGABLE_DATE),
histogram -> {},
false
)
);
assertThat(e.getMessage(), equalTo("The supplied interval [foobar] could not be parsed as a calendar interval."));
e = expectThrows(
IllegalArgumentException.class,
() -> testSearchCase(
new MatchAllDocsQuery(),
Collections.emptyList(),
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("foobar")).field(AGGREGABLE_DATE),
histogram -> {},
false
)
);
assertThat(
e.getMessage(),
equalTo(
"failed to parse setting [date_histogram.fixedInterval] with value [foobar] as a time value:"
+ " unit is missing or unrecognized"
)
);
}
public void testBuildEmpty() throws IOException {
withAggregator(
new DateHistogramAggregationBuilder("test").field(AGGREGABLE_DATE).calendarInterval(DateHistogramInterval.YEAR).offset(10),
new MatchAllDocsQuery(),
iw -> {},
(reader, aggregator) -> {
InternalDateHistogram histo = (InternalDateHistogram) aggregator.buildEmptyAggregation();
/*
* There was a time where we including the offset in the
* rounding in the emptyBucketInfo which would cause us to
* include the offset twice. This verifies that we don't do
* that.
*/
assertThat(histo.emptyBucketInfo.rounding.prepareForUnknown().round(0), equalTo(0L));
},
aggregableDateFieldType(false, true)
);
}
private void testSearchCase(
Query query,
List<String> dataset,
Consumer<DateHistogramAggregationBuilder> configure,
Consumer<InternalDateHistogram> verify,
boolean useNanosecondResolution
) throws IOException {
testSearchCase(query, dataset, configure, verify, 10000, useNanosecondResolution);
}
private void testSearchCase(
Query query,
List<String> dataset,
Consumer<DateHistogramAggregationBuilder> configure,
Consumer<InternalDateHistogram> verify,
int maxBucket,
boolean useNanosecondResolution
) throws IOException {
boolean aggregableDateIsSearchable = randomBoolean();
DateFieldMapper.DateFieldType fieldType = aggregableDateFieldType(useNanosecondResolution, aggregableDateIsSearchable);
try (Directory directory = newDirectory()) {
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
Document document = new Document();
for (String date : dataset) {
long instant = asLong(date, fieldType);
document.add(new SortedNumericDocValuesField(AGGREGABLE_DATE, instant));
if (aggregableDateIsSearchable) {
document.add(new LongPoint(AGGREGABLE_DATE, instant));
}
document.add(new LongPoint(SEARCHABLE_DATE, instant));
indexWriter.addDocument(document);
document.clear();
}
}
try (DirectoryReader indexReader = DirectoryReader.open(directory)) {
DateHistogramAggregationBuilder aggregationBuilder = new DateHistogramAggregationBuilder("_name");
if (configure != null) {
configure.accept(aggregationBuilder);
}
InternalDateHistogram histogram = searchAndReduce(
indexReader,
new AggTestConfig(aggregationBuilder, fieldType).withMaxBuckets(maxBucket).withQuery(query)
);
verify.accept(histogram);
}
}
}
private static long asLong(String dateTime) {
return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli();
}
private static long asLong(String dateTime, DateFieldMapper.DateFieldType fieldType) {
return fieldType.parse(dateTime);
}
}
|
DateHistogramAggregatorTests
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/persistent/PersistentTaskResponse.java
|
{
"start": 877,
"end": 1763
}
|
class ____ extends ActionResponse {
private final PersistentTask<?> task;
public PersistentTaskResponse(StreamInput in) throws IOException {
task = in.readOptionalWriteable(PersistentTask::new);
}
public PersistentTaskResponse(PersistentTask<?> task) {
this.task = task;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(task);
}
public PersistentTask<?> getTask() {
return task;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PersistentTaskResponse that = (PersistentTaskResponse) o;
return Objects.equals(task, that.task);
}
@Override
public int hashCode() {
return Objects.hash(task);
}
}
|
PersistentTaskResponse
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/secondarytable/Record.java
|
{
"start": 761,
"end": 1005
}
|
class ____ {
@Id @GeneratedValue(generator = "RecordSeq") long id;
String name;
@Column(table = "NonOptional") String text;
@Column(table = "NonOptional") boolean enabled;
@Column(table = "Optional", name="`comment`") String comment;
}
|
Record
|
java
|
greenrobot__EventBus
|
EventBus/src/org/greenrobot/eventbus/util/AsyncExecutor.java
|
{
"start": 1554,
"end": 2843
}
|
class ____ {
private Executor threadPool;
private Class<?> failureEventType;
private EventBus eventBus;
private Builder() {
}
public Builder threadPool(Executor threadPool) {
this.threadPool = threadPool;
return this;
}
public Builder failureEventType(Class<?> failureEventType) {
this.failureEventType = failureEventType;
return this;
}
public Builder eventBus(EventBus eventBus) {
this.eventBus = eventBus;
return this;
}
public AsyncExecutor build() {
return buildForScope(null);
}
public AsyncExecutor buildForScope(Object executionContext) {
if (eventBus == null) {
eventBus = EventBus.getDefault();
}
if (threadPool == null) {
threadPool = Executors.newCachedThreadPool();
}
if (failureEventType == null) {
failureEventType = ThrowableFailureEvent.class;
}
return new AsyncExecutor(threadPool, eventBus, failureEventType, executionContext);
}
}
/** Like {@link Runnable}, but the run method may throw an exception. */
public
|
Builder
|
java
|
google__error-prone
|
check_api/src/main/java/com/google/errorprone/util/ASTHelpers.java
|
{
"start": 56590,
"end": 57300
}
|
class ____ method that is marked as JUnit 3 or 4 test code.
*/
public static boolean isJUnitTestCode(VisitorState state) {
for (Tree ancestor : state.getPath()) {
if (ancestor instanceof MethodTree methodTree
&& JUnitMatchers.hasJUnitAnnotation(methodTree, state)) {
return true;
}
if (ancestor instanceof ClassTree classTree
&& (JUnitMatchers.isTestCaseDescendant.matches(classTree, state)
|| hasAnnotation(getSymbol(ancestor), JUNIT4_RUN_WITH_ANNOTATION, state))) {
return true;
}
}
return false;
}
/**
* Returns true if the leaf node in the {@link TreePath} from {@code state} sits somewhere
* underneath a
|
or
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/masterreplica/NodeConnectionFactory.java
|
{
"start": 385,
"end": 868
}
|
interface ____ {
/**
* Connects to a {@link SocketAddress} with the given {@link RedisCodec} asynchronously.
*
* @param codec must not be {@code null}.
* @param redisURI must not be {@code null}.
* @param <K> Key type.
* @param <V> Value type.
* @return a new {@link StatefulRedisConnection}
*/
<K, V> CompletableFuture<StatefulRedisConnection<K, V>> connectToNodeAsync(RedisCodec<K, V> codec, RedisURI redisURI);
}
|
NodeConnectionFactory
|
java
|
mybatis__mybatis-3
|
src/main/java/org/apache/ibatis/type/BlobTypeHandler.java
|
{
"start": 910,
"end": 1930
}
|
class ____ extends BaseTypeHandler<byte[]> {
public static final BlobTypeHandler INSTANCE = new BlobTypeHandler();
@Override
public void setNonNullParameter(PreparedStatement ps, int i, byte[] parameter, JdbcType jdbcType)
throws SQLException {
ByteArrayInputStream bis = new ByteArrayInputStream(parameter);
ps.setBinaryStream(i, bis, parameter.length);
}
@Override
public byte[] getNullableResult(ResultSet rs, String columnName) throws SQLException {
return toPrimitiveBytes(rs.getBlob(columnName));
}
@Override
public byte[] getNullableResult(ResultSet rs, int columnIndex) throws SQLException {
return toPrimitiveBytes(rs.getBlob(columnIndex));
}
@Override
public byte[] getNullableResult(CallableStatement cs, int columnIndex) throws SQLException {
return toPrimitiveBytes(cs.getBlob(columnIndex));
}
private byte[] toPrimitiveBytes(Blob blob) throws SQLException {
return blob == null ? null : blob.getBytes(1, (int) blob.length());
}
}
|
BlobTypeHandler
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/DoNotMockCheckerTest.java
|
{
"start": 10777,
"end": 10913
}
|
class ____ {",
" @DoNotMock(\"" + DO_NOT_MOCK_REASON + "\")",
" @AutoValue public abstract static
|
AutoValueObjects
|
java
|
elastic__elasticsearch
|
x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/EmailAction.java
|
{
"start": 8579,
"end": 8725
}
|
class ____ extends Action.Result {
protected Result(Status status) {
super(TYPE, status);
}
public static
|
Result
|
java
|
apache__flink
|
flink-table/flink-table-api-java-bridge/src/main/java/org/apache/flink/legacy/table/sources/InputFormatTableSource.java
|
{
"start": 1752,
"end": 2435
}
|
class ____<T> implements StreamTableSource<T> {
/** Returns an {@link InputFormat} for reading the data of the table. */
public abstract InputFormat<T, ?> getInputFormat();
/** Always returns true which indicates this is a bounded source. */
@Override
public final boolean isBounded() {
return true;
}
@SuppressWarnings("unchecked")
@Override
public final DataStream<T> getDataStream(StreamExecutionEnvironment execEnv) {
TypeInformation<T> typeInfo =
(TypeInformation<T>) fromDataTypeToLegacyInfo(getProducedDataType());
return execEnv.createInput(getInputFormat(), typeInfo);
}
}
|
InputFormatTableSource
|
java
|
greenrobot__greendao
|
tests/DaoTest/src/androidTest/java/org/greenrobot/greendao/daotest/contentprovider/SimpleEntityContentProviderTest.java
|
{
"start": 1401,
"end": 2794
}
|
class ____ extends AbstractDaoSessionTest<DaoMaster, DaoSession> {
public SimpleEntityContentProviderTest() {
super(DaoMaster.class);
}
@Override
protected void setUp() throws Exception {
super.setUp();
SimpleEntityContentProvider.daoSession = daoSession;
}
public void testQuery() {
SimpleEntity entity = new SimpleEntity();
entity.setSimpleString("hello");
daoSession.insert(entity);
long id = entity.getId();
SimpleEntity entity2 = new SimpleEntity();
entity2.setSimpleString("content");
daoSession.insert(entity2);
long id2 = entity2.getId();
Cursor cursor = getContext().getContentResolver().query(SimpleEntityContentProvider.CONTENT_URI, null,
null, null, "_id");
assertEquals(2, cursor.getCount());
int idxId = cursor.getColumnIndexOrThrow(SimpleEntityDao.Properties.Id.columnName);
int idxString = cursor.getColumnIndexOrThrow(SimpleEntityDao.Properties.SimpleString.columnName);
assertTrue(cursor.moveToFirst());
assertEquals("hello", cursor.getString(idxString));
assertEquals(id, cursor.getLong(idxId));
assertTrue(cursor.moveToNext());
assertEquals("content", cursor.getString(idxString));
assertEquals(id2, cursor.getLong(idxId));
}
}
|
SimpleEntityContentProviderTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/restart/classloader/ClassLoaderFile.java
|
{
"start": 2738,
"end": 3027
}
|
enum ____ {
/**
* The file has been added since the original JAR was created.
*/
ADDED,
/**
* The file has been modified since the original JAR was created.
*/
MODIFIED,
/**
* The file has been deleted since the original JAR was created.
*/
DELETED
}
}
|
Kind
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/util/LambdaSafeTests.java
|
{
"start": 16903,
"end": 16973
}
|
interface ____ {
void handle(String argument);
}
|
NonGenericCallback
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/SlotNotActiveException.java
|
{
"start": 1064,
"end": 1363
}
|
class ____ extends Exception {
private static final long serialVersionUID = 4305837511564584L;
public SlotNotActiveException(JobID jobId, AllocationID allocationId) {
super("No active slot for job " + jobId + " with allocation id " + allocationId + '.');
}
}
|
SlotNotActiveException
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/ipc/TestMRCJCSocketFactory.java
|
{
"start": 1751,
"end": 6090
}
|
class ____ {
/**
* Check that we can reach a NameNode or Resource Manager using a specific
* socket factory
*/
@Test
public void testSocketFactory() throws IOException {
// Create a standard mini-cluster
Configuration sconf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(sconf).numDataNodes(1)
.build();
final int nameNodePort = cluster.getNameNodePort();
// Get a reference to its DFS directly
FileSystem fs = cluster.getFileSystem();
assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem directDfs = (DistributedFileSystem) fs;
Configuration cconf = getCustomSocketConfigs(nameNodePort);
fs = FileSystem.get(cconf);
assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem dfs = (DistributedFileSystem) fs;
JobClient client = null;
MiniMRYarnCluster miniMRYarnCluster = null;
try {
// This will test RPC to the NameNode only.
// could we test Client-DataNode connections?
Path filePath = new Path("/dir");
assertFalse(directDfs.exists(filePath));
assertFalse(dfs.exists(filePath));
directDfs.mkdirs(filePath);
assertTrue(directDfs.exists(filePath));
assertTrue(dfs.exists(filePath));
// This will test RPC to a Resource Manager
fs = FileSystem.get(sconf);
JobConf jobConf = new JobConf();
FileSystem.setDefaultUri(jobConf, fs.getUri().toString());
miniMRYarnCluster = initAndStartMiniMRYarnCluster(jobConf);
JobConf jconf = new JobConf(miniMRYarnCluster.getConfig());
jconf.set("hadoop.rpc.socket.factory.class.default",
"org.apache.hadoop.ipc.DummySocketFactory");
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
String rmAddress = jconf.get(YarnConfiguration.RM_ADDRESS);
String[] split = rmAddress.split(":");
jconf.set(YarnConfiguration.RM_ADDRESS, split[0] + ':'
+ (Integer.parseInt(split[1]) + 10));
client = new JobClient(jconf);
JobStatus[] jobs = client.jobsToComplete();
assertTrue(jobs.length == 0);
} finally {
closeClient(client);
closeDfs(dfs);
closeDfs(directDfs);
stopMiniMRYarnCluster(miniMRYarnCluster);
shutdownDFSCluster(cluster);
}
}
private MiniMRYarnCluster initAndStartMiniMRYarnCluster(JobConf jobConf) {
MiniMRYarnCluster miniMRYarnCluster;
miniMRYarnCluster = new MiniMRYarnCluster(this.getClass().getName(), 1);
miniMRYarnCluster.init(jobConf);
miniMRYarnCluster.start();
return miniMRYarnCluster;
}
private Configuration getCustomSocketConfigs(final int nameNodePort) {
// Get another reference via network using a specific socket factory
Configuration cconf = new Configuration();
FileSystem.setDefaultUri(cconf, String.format("hdfs://localhost:%s/",
nameNodePort + 10));
cconf.set("hadoop.rpc.socket.factory.class.default",
"org.apache.hadoop.ipc.DummySocketFactory");
cconf.set("hadoop.rpc.socket.factory.class.ClientProtocol",
"org.apache.hadoop.ipc.DummySocketFactory");
cconf.set("hadoop.rpc.socket.factory.class.JobSubmissionProtocol",
"org.apache.hadoop.ipc.DummySocketFactory");
return cconf;
}
private void shutdownDFSCluster(MiniDFSCluster cluster) {
try {
if (cluster != null)
cluster.shutdown();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}
}
private void stopMiniMRYarnCluster(MiniMRYarnCluster miniMRYarnCluster) {
try {
if (miniMRYarnCluster != null)
miniMRYarnCluster.stop();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}
}
private void closeDfs(DistributedFileSystem dfs) {
try {
if (dfs != null)
dfs.close();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}
}
private void closeClient(JobClient client) {
try {
if (client != null)
client.close();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}
}
}
/**
* Dummy socket factory which shift TPC ports by subtracting 10 when
* establishing a connection
*/
|
TestMRCJCSocketFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/type/AbstractNamedEnumTest.java
|
{
"start": 3763,
"end": 3892
}
|
class ____ {
@Id
@JdbcTypeCode(SqlTypes.NAMED_ORDINAL_ENUM)
SkyType skyType;
}
@Entity(name = "Weather")
public static
|
Sky
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/usertype/ParameterizedType.java
|
{
"start": 546,
"end": 732
}
|
interface ____ {
/**
* Gets called by Hibernate to pass the configured type parameters to
* the implementation.
*/
void setParameterValues(Properties parameters);
}
|
ParameterizedType
|
java
|
quarkusio__quarkus
|
extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/filters/accesslog/JBossLoggingAccessLogReceiver.java
|
{
"start": 907,
"end": 1425
}
|
class ____ implements AccessLogReceiver {
public static final String DEFAULT_CATEGORY = "io.quarkus.vertx.http.accesslog";
private final Logger logger;
public JBossLoggingAccessLogReceiver(final String category) {
this.logger = Logger.getLogger(category);
}
public JBossLoggingAccessLogReceiver() {
this.logger = Logger.getLogger(DEFAULT_CATEGORY);
}
@Override
public void logMessage(String message) {
logger.info(message);
}
}
|
JBossLoggingAccessLogReceiver
|
java
|
reactor__reactor-core
|
reactor-core/src/withMicrometerTest/java/reactor/core/publisher/FluxTapTest.java
|
{
"start": 30299,
"end": 35413
}
|
class ____ {
@Test
void subscriberImplementationsFromFluxTapFuseable() {
Mono<Integer> fuseableSource = Mono.just(1);
assertThat(fuseableSource).as("smoke test fuseableSource").isInstanceOf(Fuseable.class);
Mono<Integer> fuseable = fuseableSource.tap(TestSignalListener::new);
assertThat(fuseable).as("fuseable").isInstanceOf(MonoTapFuseable.class);
//the TestSubscriber "requireFusion" configuration below are intentionally inverted
//so that an exception describing the actual Subscription is thrown when calling block()
TestSubscriber<Integer> testSubscriberForFuseable = TestSubscriber.builder().requireNotFuseable().build();
TestSubscriber<Integer> testSubscriberForFuseableConditional = TestSubscriber.builder().requireNotFuseable().buildConditional(i -> true);
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(() -> fuseable.subscribeWith(testSubscriberForFuseable).block())
.as("TapFuseableSubscriber")
.withMessageContaining("got reactor.core.publisher.FluxTapFuseable$TapFuseableSubscriber");
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(() -> fuseable.subscribeWith(testSubscriberForFuseableConditional).block())
.as("TapFuseableConditionalSubscriber")
.withMessageContaining("got reactor.core.publisher.FluxTapFuseable$TapConditionalFuseableSubscriber");
}
@Test
void throwingCreateListener() {
TestSubscriber<Integer> testSubscriber = TestSubscriber.create();
MonoTapFuseable<Integer, Void> test = new MonoTapFuseable<>(Mono.just(1),
new SignalListenerFactory<Integer, Void>() {
@Override
public Void initializePublisherState(Publisher<? extends Integer> source) {
return null;
}
@Override
public SignalListener<Integer> createListener(Publisher<? extends Integer> source,
ContextView listenerContext, Void publisherContext) {
throw new IllegalStateException("expected");
}
});
assertThatCode(() -> test.subscribeOrReturn(testSubscriber))
.doesNotThrowAnyException();
assertThat(testSubscriber.expectTerminalError())
.as("downstream error")
.isInstanceOf(IllegalStateException.class)
.hasMessage("expected");
}
//doFirst is invoked from each publisher's subscribeOrReturn
@Test
void doFirst() {
TestSubscriber<Integer> testSubscriber = TestSubscriber.create();
TestSignalListener<Integer> listener = new TestSignalListener<>();
MonoTapFuseable<Integer, Void> test = new MonoTapFuseable<>(Mono.just(1), factoryOf(listener));
assertThatCode(() -> test.subscribeOrReturn(testSubscriber))
.doesNotThrowAnyException();
assertThat(listener.listenerErrors).as("listenerErrors").isEmpty();
assertThat(listener.events)
.as("events")
.containsExactly(
"doFirst"
);
}
@Test
void throwingAlterContext() {
TestSubscriber<Integer> testSubscriber = TestSubscriber.create();
TestSignalListener<Integer> testSignalListener =
new TestSignalListener<Integer>() {
@Override
public Context addToContext(Context originalContext) {
throw new IllegalStateException("expected");
}
};
MonoTapFuseable<Integer, Void> test = new MonoTapFuseable<>(
Mono.just(1), factoryOf(testSignalListener));
assertThatCode(() -> test.subscribeOrReturn(testSubscriber))
.doesNotThrowAnyException();
assertThat(testSubscriber.expectTerminalError())
.as("downstream error")
.isInstanceOf(IllegalStateException.class)
.hasMessage("Unable to augment tap Context at subscription via addToContext")
.extracting(Throwable::getCause)
.satisfies(t -> assertThat(t)
.isInstanceOf(IllegalStateException.class)
.hasMessage("expected"));
assertThat(testSignalListener.listenerErrors)
.as("listenerErrors")
.satisfies(errors -> {
assertThat(errors.size()).isEqualTo(1);
assertThat(errors.stream().findFirst().get())
.isInstanceOf(IllegalStateException.class)
.hasMessage("Unable to augment tap Context at subscription via addToContext")
.extracting(Throwable::getCause)
.satisfies(t -> assertThat(t)
.isInstanceOf(IllegalStateException.class)
.hasMessage("expected"));
});
assertThat(testSignalListener.events)
.containsExactly("doFirst");
}
@Test
void doFirstListenerError() {
Throwable listenerError = new IllegalStateException("expected from doFirst");
TestSubscriber<Integer> testSubscriber = TestSubscriber.create();
TestSignalListener<Integer> listener = new TestSignalListener<Integer>() {
@Override
public void doFirst() throws Throwable {
throw listenerError;
}
};
MonoTapFuseable<Integer, Void> test = new MonoTapFuseable<>(Mono.just(1), factoryOf(listener));
assertThatCode(() -> test.subscribeOrReturn(testSubscriber))
.doesNotThrowAnyException();
assertThat(listener.listenerErrors)
.as("listenerErrors")
.containsExactly(listenerError);
assertThat(listener.events)
.as("events")
.isEmpty();
}
}
@Nested
|
MonoTapFuseableTest
|
java
|
resilience4j__resilience4j
|
resilience4j-retry/src/test/java/io/github/resilience4j/retry/internal/InMemoryRetryRegistryTest.java
|
{
"start": 1006,
"end": 2656
}
|
class ____ {
@Test
public void shouldCreateRetryRegistryWithRegistryStore() {
RegistryEventConsumer<Retry> registryEventConsumer = getNoOpsRegistryEventConsumer();
List<RegistryEventConsumer<Retry>> registryEventConsumers = new ArrayList<>();
registryEventConsumers.add(registryEventConsumer);
Map<String, RetryConfig> configs = new HashMap<>();
final RetryConfig defaultConfig = RetryConfig.ofDefaults();
configs.put("default", defaultConfig);
final InMemoryRetryRegistry inMemoryRetryRegistry =
new InMemoryRetryRegistry(configs, registryEventConsumers,
Map.of("Tag1", "Tag1Value"), new InMemoryRegistryStore<>());
assertThat(inMemoryRetryRegistry).isNotNull();
assertThat(inMemoryRetryRegistry.getDefaultConfig()).isEqualTo(defaultConfig);
assertThat(inMemoryRetryRegistry.getConfiguration("testNotFound")).isEmpty();
inMemoryRetryRegistry.addConfiguration("testConfig", defaultConfig);
assertThat(inMemoryRetryRegistry.getConfiguration("testConfig")).isNotNull();
}
private RegistryEventConsumer<Retry> getNoOpsRegistryEventConsumer() {
return new RegistryEventConsumer<Retry>() {
@Override
public void onEntryAddedEvent(EntryAddedEvent<Retry> entryAddedEvent) {
}
@Override
public void onEntryRemovedEvent(EntryRemovedEvent<Retry> entryRemoveEvent) {
}
@Override
public void onEntryReplacedEvent(EntryReplacedEvent<Retry> entryReplacedEvent) {
}
};
}
}
|
InMemoryRetryRegistryTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/CbrtUnsignedLongEvaluator.java
|
{
"start": 3989,
"end": 4582
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory val;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val) {
this.source = source;
this.val = val;
}
@Override
public CbrtUnsignedLongEvaluator get(DriverContext context) {
return new CbrtUnsignedLongEvaluator(source, val.get(context), context);
}
@Override
public String toString() {
return "CbrtUnsignedLongEvaluator[" + "val=" + val + "]";
}
}
}
|
Factory
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/config/plugins/convert/TypeConverters.java
|
{
"start": 3605,
"end": 4716
}
|
class ____ implements TypeConverter<byte[]> {
private static final String PREFIX_0x = "0x";
private static final String PREFIX_BASE64 = "Base64:";
@Override
public byte[] convert(final String value) {
byte[] bytes;
if (value == null || value.isEmpty()) {
bytes = Constants.EMPTY_BYTE_ARRAY;
} else if (value.startsWith(PREFIX_BASE64)) {
final String lexicalXSDBase64Binary = value.substring(PREFIX_BASE64.length());
bytes = Base64Converter.parseBase64Binary(lexicalXSDBase64Binary);
} else if (value.startsWith(PREFIX_0x)) {
final String lexicalXSDHexBinary = value.substring(PREFIX_0x.length());
bytes = HexConverter.parseHexBinary(lexicalXSDHexBinary);
} else {
bytes = value.getBytes(Charset.defaultCharset());
}
return bytes;
}
}
/**
* Converts a {@link String} into a {@link Byte}.
*/
@Plugin(name = "Byte", category = CATEGORY)
public static
|
ByteArrayConverter
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/mixins/MixinSerForFieldsTest.java
|
{
"start": 262,
"end": 456
}
|
class ____ {
public String a;
public String b;
public BeanClass(String a, String b) {
this.a = a;
this.b = b;
}
}
abstract
|
BeanClass
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableConcatWithMaybe.java
|
{
"start": 1829,
"end": 3347
}
|
class ____<T>
extends SinglePostCompleteSubscriber<T, T>
implements MaybeObserver<T> {
private static final long serialVersionUID = -7346385463600070225L;
final AtomicReference<Disposable> otherDisposable;
MaybeSource<? extends T> other;
boolean inMaybe;
ConcatWithSubscriber(Subscriber<? super T> actual, MaybeSource<? extends T> other) {
super(actual);
this.other = other;
this.otherDisposable = new AtomicReference<>();
}
@Override
public void onSubscribe(Disposable d) {
DisposableHelper.setOnce(otherDisposable, d);
}
@Override
public void onNext(T t) {
produced++;
downstream.onNext(t);
}
@Override
public void onError(Throwable t) {
downstream.onError(t);
}
@Override
public void onSuccess(T t) {
complete(t);
}
@Override
public void onComplete() {
if (inMaybe) {
downstream.onComplete();
} else {
inMaybe = true;
upstream = SubscriptionHelper.CANCELLED;
MaybeSource<? extends T> ms = other;
other = null;
ms.subscribe(this);
}
}
@Override
public void cancel() {
super.cancel();
DisposableHelper.dispose(otherDisposable);
}
}
}
|
ConcatWithSubscriber
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/api/common/state/v2/ReducingStateDescriptorTest.java
|
{
"start": 1183,
"end": 2343
}
|
class ____ {
@Test
void testHashCodeAndEquals() throws Exception {
final String name = "testName";
ReduceFunction<Integer> reducer = Integer::sum;
ReducingStateDescriptor<Integer> original =
new ReducingStateDescriptor<>(name, reducer, BasicTypeInfo.INT_TYPE_INFO);
ReducingStateDescriptor<Integer> same =
new ReducingStateDescriptor<>(name, reducer, BasicTypeInfo.INT_TYPE_INFO);
ReducingStateDescriptor<Integer> sameBySerializer =
new ReducingStateDescriptor<>(name, reducer, BasicTypeInfo.INT_TYPE_INFO);
// test that hashCode() works on state descriptors with initialized and uninitialized
// serializers
assertThat(same).hasSameHashCodeAs(original);
assertThat(sameBySerializer).hasSameHashCodeAs(original);
assertThat(same).isEqualTo(original);
assertThat(sameBySerializer).isEqualTo(original);
// equality with a clone
ReducingStateDescriptor<Integer> clone = CommonTestUtils.createCopySerializable(original);
assertThat(clone).isEqualTo(original);
}
}
|
ReducingStateDescriptorTest
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/util/BlockingIgnoringReceiver.java
|
{
"start": 823,
"end": 1178
}
|
class ____
extends CountDownLatch
implements Consumer<Throwable>, Action {
public Throwable error;
public BlockingIgnoringReceiver() {
super(1);
}
@Override
public void accept(Throwable e) {
error = e;
countDown();
}
@Override
public void run() {
countDown();
}
}
|
BlockingIgnoringReceiver
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java
|
{
"start": 14269,
"end": 14860
}
|
class ____ extends SqlExpressionTranslator<IsNotNull> {
@Override
protected QueryTranslation asQuery(IsNotNull isNotNull, boolean onAggs, TranslatorHandler handler) {
Query query = null;
AggFilter aggFilter = null;
if (onAggs) {
aggFilter = new AggFilter(id(isNotNull), isNotNull.asScript());
} else {
query = ExpressionTranslators.IsNotNulls.doTranslate(isNotNull, handler);
}
return new QueryTranslation(query, aggFilter);
}
}
static
|
IsNotNullTranslator
|
java
|
apache__kafka
|
connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectCluster.java
|
{
"start": 2643,
"end": 2853
}
|
class ____ provides various
* utility methods to perform actions on the Connect cluster such as connector creation, config validation, connector
* restarts, pause / resume, connector deletion etc.
*/
public
|
also
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java
|
{
"start": 1951,
"end": 5507
}
|
class ____ extends HandledTransportAction<
PostCalendarEventsAction.Request,
PostCalendarEventsAction.Response> {
private static final Logger logger = LogManager.getLogger(TransportPostCalendarEventsAction.class);
private final Client client;
private final JobResultsProvider jobResultsProvider;
private final JobManager jobManager;
@Inject
public TransportPostCalendarEventsAction(
TransportService transportService,
ActionFilters actionFilters,
Client client,
JobResultsProvider jobResultsProvider,
JobManager jobManager
) {
super(
PostCalendarEventsAction.NAME,
transportService,
actionFilters,
PostCalendarEventsAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.client = client;
this.jobResultsProvider = jobResultsProvider;
this.jobManager = jobManager;
}
@Override
protected void doExecute(
Task task,
PostCalendarEventsAction.Request request,
ActionListener<PostCalendarEventsAction.Response> listener
) {
List<ScheduledEvent> events = request.getScheduledEvents();
ActionListener<Calendar> calendarListener = ActionListener.wrap(calendar -> {
logger.debug(
"Calendar [{}] accepted for background update: {} jobs with {} events",
request.getCalendarId(),
calendar.getJobIds().size(),
events.size()
);
BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
for (ScheduledEvent event : events) {
IndexRequest indexRequest = new IndexRequest(MlMetaIndex.indexName());
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
indexRequest.source(
event.toXContent(
builder,
new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true"))
)
);
} catch (IOException e) {
throw new IllegalStateException("Failed to serialise event", e);
}
bulkRequestBuilder.add(indexRequest);
}
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
executeAsyncWithOrigin(
client,
ML_ORIGIN,
TransportBulkAction.TYPE,
bulkRequestBuilder.request(),
new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
jobManager.updateProcessOnCalendarChanged(calendar.getJobIds(), ActionListener.wrap(r -> {
logger.debug("Calendar [{}] update initiated successfully", request.getCalendarId());
listener.onResponse(new PostCalendarEventsAction.Response(events));
}, listener::onFailure));
}
@Override
public void onFailure(Exception e) {
listener.onFailure(ExceptionsHelper.serverError("Error indexing event", e));
}
}
);
}, listener::onFailure);
jobResultsProvider.calendar(request.getCalendarId(), calendarListener);
}
}
|
TransportPostCalendarEventsAction
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockitousage/annotation/MockInjectionUsingConstructorTest.java
|
{
"start": 6655,
"end": 6763
}
|
class ____ {}
@Test
public void injectMocksMustSucceedWithStaticInnerClass() {
|
StaticInnerClass
|
java
|
apache__camel
|
components/camel-ignite/src/generated/java/org/apache/camel/component/ignite/cache/IgniteCacheComponentConfigurer.java
|
{
"start": 739,
"end": 3776
}
|
class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
IgniteCacheComponent target = (IgniteCacheComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "configurationresource":
case "configurationResource": target.setConfigurationResource(property(camelContext, java.lang.Object.class, value)); return true;
case "ignite": target.setIgnite(property(camelContext, org.apache.ignite.Ignite.class, value)); return true;
case "igniteconfiguration":
case "igniteConfiguration": target.setIgniteConfiguration(property(camelContext, org.apache.ignite.configuration.IgniteConfiguration.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "configurationresource":
case "configurationResource": return java.lang.Object.class;
case "ignite": return org.apache.ignite.Ignite.class;
case "igniteconfiguration":
case "igniteConfiguration": return org.apache.ignite.configuration.IgniteConfiguration.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
IgniteCacheComponent target = (IgniteCacheComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "configurationresource":
case "configurationResource": return target.getConfigurationResource();
case "ignite": return target.getIgnite();
case "igniteconfiguration":
case "igniteConfiguration": return target.getIgniteConfiguration();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
default: return null;
}
}
}
|
IgniteCacheComponentConfigurer
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorErrorActionRequestBWCSerializingTests.java
|
{
"start": 564,
"end": 2259
}
|
class ____ extends AbstractBWCSerializationTestCase<
UpdateConnectorErrorAction.Request> {
private String connectorId;
@Override
protected Writeable.Reader<UpdateConnectorErrorAction.Request> instanceReader() {
return UpdateConnectorErrorAction.Request::new;
}
@Override
protected UpdateConnectorErrorAction.Request createTestInstance() {
this.connectorId = randomUUID();
return new UpdateConnectorErrorAction.Request(connectorId, randomAlphaOfLengthBetween(5, 15));
}
@Override
protected UpdateConnectorErrorAction.Request mutateInstance(UpdateConnectorErrorAction.Request instance) throws IOException {
String originalConnectorId = instance.getConnectorId();
String error = instance.getError();
switch (randomIntBetween(0, 1)) {
case 0 -> originalConnectorId = randomValueOtherThan(originalConnectorId, () -> randomUUID());
case 1 -> error = randomValueOtherThan(error, () -> randomAlphaOfLengthBetween(5, 15));
default -> throw new AssertionError("Illegal randomisation branch");
}
return new UpdateConnectorErrorAction.Request(originalConnectorId, error);
}
@Override
protected UpdateConnectorErrorAction.Request doParseInstance(XContentParser parser) throws IOException {
return UpdateConnectorErrorAction.Request.fromXContent(parser, this.connectorId);
}
@Override
protected UpdateConnectorErrorAction.Request mutateInstanceForVersion(
UpdateConnectorErrorAction.Request instance,
TransportVersion version
) {
return instance;
}
}
|
UpdateConnectorErrorActionRequestBWCSerializingTests
|
java
|
netty__netty
|
codec-compression/src/main/java/io/netty/handler/codec/compression/ZlibEncoder.java
|
{
"start": 930,
"end": 1719
}
|
class ____ extends MessageToByteEncoder<ByteBuf> {
protected ZlibEncoder() {
super(ByteBuf.class, false);
}
/**
* Returns {@code true} if and only if the end of the compressed stream
* has been reached.
*/
public abstract boolean isClosed();
/**
* Close this {@link ZlibEncoder} and so finish the encoding.
*
* The returned {@link ChannelFuture} will be notified once the
* operation completes.
*/
public abstract ChannelFuture close();
/**
* Close this {@link ZlibEncoder} and so finish the encoding.
* The given {@link ChannelFuture} will be notified once the operation
* completes and will also be returned.
*/
public abstract ChannelFuture close(ChannelPromise promise);
}
|
ZlibEncoder
|
java
|
mybatis__mybatis-3
|
src/main/java/org/apache/ibatis/scripting/xmltags/OgnlCache.java
|
{
"start": 1072,
"end": 2087
}
|
class ____ {
private static final OgnlMemberAccess MEMBER_ACCESS = new OgnlMemberAccess();
private static final OgnlClassResolver CLASS_RESOLVER = new OgnlClassResolver();
private static final Map<String, Object> expressionCache = new ConcurrentHashMap<>();
private OgnlCache() {
// Prevent Instantiation of Static Class
}
public static Object getValue(String expression, Object root) {
try {
OgnlContext context = Ognl.createDefaultContext(root, MEMBER_ACCESS, CLASS_RESOLVER, null);
return Ognl.getValue(parseExpression(expression), context, root);
} catch (OgnlException e) {
throw new BuilderException("Error evaluating expression '" + expression + "'. Cause: " + e, e);
}
}
private static Object parseExpression(String expression) throws OgnlException {
Object node = expressionCache.get(expression);
if (node == null) {
node = Ognl.parseExpression(expression);
expressionCache.put(expression, node);
}
return node;
}
}
|
OgnlCache
|
java
|
apache__logging-log4j2
|
log4j-core-test/src/main/java/org/apache/logging/log4j/core/test/util/Profiler.java
|
{
"start": 1102,
"end": 3137
}
|
class ____ {
private static final Logger LOGGER = StatusLogger.getLogger();
private static Object profiler;
private static Class<?> profilingModes;
private static Class<?> controllerClazz;
static {
try {
controllerClazz = LoaderUtil.loadClass("com.yourkit.api.Controller");
profilingModes = LoaderUtil.loadClass("com.yourkit.api.ProfilingModes");
try {
profiler = controllerClazz.getConstructor().newInstance();
} catch (final Exception e) {
LOGGER.error("Profiler was active, but failed.", e);
}
} catch (final Exception ignored) {
// Ignore
}
}
private Profiler() {}
public static boolean isActive() {
return profiler != null;
}
private static long cpuSampling() throws NoSuchFieldException, IllegalAccessException {
return profilingModes.getDeclaredField("CPU_SAMPLING").getLong(profilingModes);
}
private static long snapshotWithoutHeap() throws NoSuchFieldException, IllegalAccessException {
return profilingModes.getDeclaredField("SNAPSHOT_WITHOUT_HEAP").getLong(profilingModes);
}
public static void start() {
if (profiler != null) {
try {
controllerClazz
.getMethod("startCPUProfiling", long.class, String.class)
.invoke(profiler, cpuSampling(), Strings.EMPTY);
} catch (final Exception e) {
LOGGER.error("Profiler was active, but failed.", e);
}
}
}
public static void stop() {
if (profiler != null) {
try {
controllerClazz.getMethod("captureSnapshot", long.class).invoke(profiler, snapshotWithoutHeap());
controllerClazz.getMethod("stopCPUProfiling").invoke(profiler);
} catch (final Exception e) {
LOGGER.error("Profiler was active, but failed.", e);
}
}
}
}
|
Profiler
|
java
|
apache__rocketmq
|
auth/src/main/java/org/apache/rocketmq/auth/migration/v1/PlainPermissionManager.java
|
{
"start": 1393,
"end": 6147
}
|
class ____ {
private static final Logger log = LoggerFactory.getLogger(LoggerName.COMMON_LOGGER_NAME);
private String fileHome = MixAll.ROCKETMQ_HOME_DIR;
private String defaultAclDir;
private String defaultAclFile;
private List<String> fileList = new ArrayList<>();
public PlainPermissionManager() {
this.defaultAclDir = MixAll.dealFilePath(fileHome + File.separator + "conf" + File.separator + "acl");
this.defaultAclFile = MixAll.dealFilePath(fileHome + File.separator + System.getProperty("rocketmq.acl.plain.file", "conf" + File.separator + "plain_acl.yml"));
load();
}
public List<String> getAllAclFiles(String path) {
if (!new File(path).exists()) {
log.info("The default acl dir {} is not exist", path);
return new ArrayList<>();
}
List<String> allAclFileFullPath = new ArrayList<>();
File file = new File(path);
File[] files = file.listFiles();
for (int i = 0; files != null && i < files.length; i++) {
String fileName = files[i].getAbsolutePath();
File f = new File(fileName);
if (fileName.equals(fileHome + MixAll.ACL_CONF_TOOLS_FILE)) {
continue;
} else if (fileName.endsWith(".yml") || fileName.endsWith(".yaml")) {
allAclFileFullPath.add(fileName);
} else if (f.isDirectory()) {
allAclFileFullPath.addAll(getAllAclFiles(fileName));
}
}
return allAclFileFullPath;
}
public void load() {
if (fileHome == null || fileHome.isEmpty()) {
return;
}
assureAclConfigFilesExist();
fileList = getAllAclFiles(defaultAclDir);
if (new File(defaultAclFile).exists() && !fileList.contains(defaultAclFile)) {
fileList.add(defaultAclFile);
}
}
/**
* Currently GlobalWhiteAddress is defined in {@link #defaultAclFile}, so make sure it exists.
*/
private void assureAclConfigFilesExist() {
final Path defaultAclFilePath = Paths.get(this.defaultAclFile);
if (!Files.exists(defaultAclFilePath)) {
try {
Files.createFile(defaultAclFilePath);
} catch (FileAlreadyExistsException e) {
// Maybe created by other threads
} catch (IOException e) {
log.error("Error in creating " + this.defaultAclFile, e);
throw new AclException(e.getMessage());
}
}
}
public AclConfig getAllAclConfig() {
AclConfig aclConfig = new AclConfig();
List<PlainAccessConfig> configs = new ArrayList<>();
List<String> whiteAddrs = new ArrayList<>();
Set<String> accessKeySets = new HashSet<>();
for (String path : fileList) {
PlainAccessData plainAclConfData = AclUtils.getYamlDataObject(path, PlainAccessData.class);
if (plainAclConfData == null) {
continue;
}
List<String> globalWhiteAddrs = plainAclConfData.getGlobalWhiteRemoteAddresses();
if (globalWhiteAddrs != null && !globalWhiteAddrs.isEmpty()) {
whiteAddrs.addAll(globalWhiteAddrs);
}
List<PlainAccessConfig> plainAccessConfigs = plainAclConfData.getAccounts();
if (plainAccessConfigs != null && !plainAccessConfigs.isEmpty()) {
for (PlainAccessConfig accessConfig : plainAccessConfigs) {
if (!accessKeySets.contains(accessConfig.getAccessKey())) {
accessKeySets.add(accessConfig.getAccessKey());
PlainAccessConfig plainAccessConfig = new PlainAccessConfig();
plainAccessConfig.setGroupPerms(accessConfig.getGroupPerms());
plainAccessConfig.setDefaultTopicPerm(accessConfig.getDefaultTopicPerm());
plainAccessConfig.setDefaultGroupPerm(accessConfig.getDefaultGroupPerm());
plainAccessConfig.setAccessKey(accessConfig.getAccessKey());
plainAccessConfig.setSecretKey(accessConfig.getSecretKey());
plainAccessConfig.setAdmin(accessConfig.isAdmin());
plainAccessConfig.setTopicPerms(accessConfig.getTopicPerms());
plainAccessConfig.setWhiteRemoteAddress(accessConfig.getWhiteRemoteAddress());
configs.add(plainAccessConfig);
}
}
}
}
aclConfig.setPlainAccessConfigs(configs);
aclConfig.setGlobalWhiteAddrs(whiteAddrs);
return aclConfig;
}
}
|
PlainPermissionManager
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TimeoutInvocationFactoryTests.java
|
{
"start": 1990,
"end": 4409
}
|
class ____ {
@Spy
private final Store store = new NamespaceAwareStore(new NamespacedHierarchicalStore<>(null),
Namespace.create(TimeoutInvocationFactoryTests.class));
@Mock
private Invocation<String> invocation;
@Mock
private TimeoutDuration timeoutDuration;
private TimeoutInvocationFactory timeoutInvocationFactory;
private TimeoutInvocationParameters<String> parameters;
@BeforeEach
void setUp() {
parameters = new TimeoutInvocationParameters<>(invocation, timeoutDuration, () -> "description",
PreInterruptCallbackInvocation.NOOP);
timeoutInvocationFactory = new TimeoutInvocationFactory(store);
}
@SuppressWarnings("DataFlowIssue")
@Test
@DisplayName("throws exception when null store is provided on create")
void shouldThrowExceptionWhenInstantiatingWithNullStore() {
assertThatThrownBy(() -> new TimeoutInvocationFactory(null)) //
.hasMessage("store must not be null");
}
@SuppressWarnings("DataFlowIssue")
@Test
@DisplayName("throws exception when null timeout thread mode is provided on create")
void shouldThrowExceptionWhenNullTimeoutThreadModeIsProvidedWhenCreate() {
assertThatThrownBy(() -> timeoutInvocationFactory.create(null, parameters)) //
.hasMessage("thread mode must not be null");
}
@SuppressWarnings("DataFlowIssue")
@Test
@DisplayName("throws exception when null timeout invocation parameters is provided on create")
void shouldThrowExceptionWhenNullTimeoutInvocationParametersIsProvidedWhenCreate() {
assertThatThrownBy(() -> timeoutInvocationFactory.create(ThreadMode.SAME_THREAD, null)) //
.hasMessage("timeout invocation parameters must not be null");
}
@SuppressWarnings("resource")
@Test
@DisplayName("creates timeout invocation for SAME_THREAD thread mode")
void shouldCreateTimeoutInvocationForSameThreadTimeoutThreadMode() {
var invocation = timeoutInvocationFactory.create(ThreadMode.SAME_THREAD, parameters);
assertThat(invocation).isInstanceOf(SameThreadTimeoutInvocation.class);
verify(store).computeIfAbsent(SingleThreadExecutorResource.class);
}
@Test
@DisplayName("creates timeout invocation for SEPARATE_THREAD thread mode")
void shouldCreateTimeoutInvocationForSeparateThreadTimeoutThreadMode() {
var invocation = timeoutInvocationFactory.create(ThreadMode.SEPARATE_THREAD, parameters);
assertThat(invocation).isInstanceOf(SeparateThreadTimeoutInvocation.class);
}
}
|
TimeoutInvocationFactoryTests
|
java
|
processing__processing4
|
core/src/processing/opengl/Texture.java
|
{
"start": 42923,
"end": 43064
}
|
class ____ the parameters for a texture: target, internal format,
* minimization filter and magnification filter.
*/
static public
|
stores
|
java
|
quarkusio__quarkus
|
integration-tests/virtual-threads/virtual-threads-disabled/src/test/java/io/quarkus/virtual/disabled/RunOnVirtualThreadDisabledTest.java
|
{
"start": 372,
"end": 2617
}
|
class ____ {
@Test
void testGet() {
RestAssured.get().then()
.assertThat().statusCode(200)
.body(is("hello-1"));
RestAssured.get().then()
.assertThat().statusCode(200)
// Same value - request scoped bean
.body(is("hello-1"));
}
@Test
void testPost() {
var body1 = UUID.randomUUID().toString();
var body2 = UUID.randomUUID().toString();
RestAssured
.given().body(body1)
.post().then()
.assertThat().statusCode(200)
.body(is(body1 + "-1"));
RestAssured
.given().body(body2)
.post().then()
.assertThat().statusCode(200)
// Same value - request scoped bean
.body(is(body2 + "-1"));
}
@Test
void testNonBlocking() {
// Non Blocking
RestAssured.get("/non-blocking").then()
.assertThat().statusCode(200)
.body(is("ok"));
// Uni
RestAssured.get("/uni").then()
.assertThat().statusCode(200)
.body(is("ok"));
// Multi
RestAssured.get("/multi").then()
.assertThat().statusCode(200)
.body(is("ok"));
}
@Test
void testRegularBlocking() {
RestAssured.get("/blocking").then()
.assertThat().statusCode(200)
.body(is("hello-1"));
}
@Test
void testRunOnVirtualThreadOnClass() {
RestAssured.get("/class").then()
.assertThat().statusCode(200)
.body(is("hello-1"));
RestAssured.get("/class").then()
.assertThat().statusCode(200)
.body(is("hello-1"));
RestAssured.get("/class/uni").then()
.assertThat().statusCode(200)
.body(is("ok"));
RestAssured.get("/class/multi").then()
.assertThat().statusCode(200)
.body(is("ok"));
}
@Test
void testFilters() {
RestAssured.get("/filter").then()
.assertThat().statusCode(200);
}
}
|
RunOnVirtualThreadDisabledTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/argumentselectiondefects/NameInCommentHeuristicTest.java
|
{
"start": 3667,
"end": 4227
}
|
class ____ {
abstract void target(Object first);
void test(Object first) {
// BUG: Diagnostic contains: true
target(/* first= */ first);
}
}
""")
.doTest();
}
@Test
public void
nameInCommentHeuristic_returnsTrue_wherePreceedingCommentHasEqualsSpacesAndExtraText() {
CompilationTestHelper.newInstance(NameInCommentHeuristicChecker.class, getClass())
.addSourceLines(
"Test.java",
"""
abstract
|
Test
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-api/src/test/java/org/apache/dubbo/config/AbstractConfigTest.java
|
{
"start": 33740,
"end": 38945
}
|
class ____ extends AbstractConfig {
private Class interfaceClass;
private String filter;
private String listener;
private Map<String, String> parameters;
private String[] configFields;
public Class getInterface() {
return interfaceClass;
}
public void setInterface(Class interfaceName) {
this.interfaceClass = interfaceName;
}
public String getFilter() {
return filter;
}
public void setFilter(String filter) {
this.filter = filter;
}
public String getListener() {
return listener;
}
public void setListener(String listener) {
this.listener = listener;
}
public Map<String, String> getParameters() {
return parameters;
}
public void setParameters(Map<String, String> parameters) {
this.parameters = parameters;
}
public String[] getConfigFields() {
return configFields;
}
public void setConfigFields(String[] configFields) {
this.configFields = configFields;
}
}
@Test
void testMetaData() throws Exception {
// Expect empty metadata for new instance
// Check and set default value of field in checkDefault() method
List<Class<? extends AbstractConfig>> configClasses = Arrays.asList(
ApplicationConfig.class,
ConsumerConfig.class,
ProviderConfig.class,
ReferenceConfig.class,
ServiceConfig.class,
ProtocolConfig.class,
RegistryConfig.class,
ConfigCenterConfig.class,
MetadataReportConfig.class,
ModuleConfig.class,
SslConfig.class,
MetricsConfig.class,
MonitorConfig.class,
MethodConfig.class);
for (Class<? extends AbstractConfig> configClass : configClasses) {
AbstractConfig config = configClass.getDeclaredConstructor().newInstance();
Map<String, String> metaData = config.getMetaData();
Assertions.assertEquals(
0,
metaData.size(),
"Expect empty metadata for new instance but found: " + metaData + " of "
+ configClass.getSimpleName());
}
}
@Test
void testRefreshNested() {
try {
OuterConfig outerConfig = new OuterConfig();
Map<String, String> external = new HashMap<>();
external.put("dubbo.outer.a1", "1");
external.put("dubbo.outer.b.b1", "11");
external.put("dubbo.outer.b.b2", "12");
ApplicationModel.defaultModel().modelEnvironment().initialize();
ApplicationModel.defaultModel().modelEnvironment().setExternalConfigMap(external);
// refresh config
outerConfig.refresh();
Assertions.assertEquals(1, outerConfig.getA1());
Assertions.assertEquals(11, outerConfig.getB().getB1());
Assertions.assertEquals(12, outerConfig.getB().getB2());
} finally {
ApplicationModel.defaultModel().modelEnvironment().destroy();
}
}
@Test
void testRefreshNestedWithId() {
try {
System.setProperty("dubbo.outers.test.a1", "1");
System.setProperty("dubbo.outers.test.b.b1", "11");
System.setProperty("dubbo.outers.test.b.b2", "12");
ApplicationModel.defaultModel().modelEnvironment().initialize();
OuterConfig outerConfig = new OuterConfig("test");
outerConfig.refresh();
Assertions.assertEquals(1, outerConfig.getA1());
Assertions.assertEquals(11, outerConfig.getB().getB1());
Assertions.assertEquals(12, outerConfig.getB().getB2());
} finally {
ApplicationModel.defaultModel().modelEnvironment().destroy();
System.clearProperty("dubbo.outers.test.a1");
System.clearProperty("dubbo.outers.test.b.b1");
System.clearProperty("dubbo.outers.test.b.b2");
}
}
@Test
void testRefreshNestedBySystemProperties() {
try {
Properties p = System.getProperties();
p.put("dubbo.outer.a1", "1");
p.put("dubbo.outer.b.b1", "11");
p.put("dubbo.outer.b.b2", "12");
ApplicationModel.defaultModel().modelEnvironment().initialize();
OuterConfig outerConfig = new OuterConfig();
outerConfig.refresh();
Assertions.assertEquals(1, outerConfig.getA1());
Assertions.assertEquals(11, outerConfig.getB().getB1());
Assertions.assertEquals(12, outerConfig.getB().getB2());
} finally {
ApplicationModel.defaultModel().modelEnvironment().destroy();
System.clearProperty("dubbo.outer.a1");
System.clearProperty("dubbo.outer.b.b1");
System.clearProperty("dubbo.outer.b.b2");
}
}
private static
|
AnnotationConfig
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/interop/ImmutablesTypeSerializationTest.java
|
{
"start": 5996,
"end": 7009
}
|
class ____<T>
implements ImmutablesTypeSerializationTest.Key<T> {
private final T id;
ImmutableKey(T id) {
this.id = id;
}
@JsonProperty("id")
@Override
public T getId() {
return id;
}
@Override
public boolean equals(Object another) {
if (this == another) return true;
return another instanceof ImmutableKey<?>
&& equalTo((ImmutableKey<?>) another);
}
private boolean equalTo(ImmutableKey<?> another) {
return id.equals(another.id);
}
@Override
public int hashCode() {
int h = 5381;
h += (h << 5) + id.hashCode();
return h;
}
@Override
public String toString() {
return "Key{id=" + id + "}";
}
@JsonDeserialize
@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.NONE)
static final
|
ImmutableKey
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/net/impl/SelfSignedCertificateImpl.java
|
{
"start": 644,
"end": 1805
}
|
class ____ implements SelfSignedCertificate {
private final io.netty.handler.ssl.util.SelfSignedCertificate certificate;
public SelfSignedCertificateImpl() {
try {
certificate = new io.netty.handler.ssl.util.SelfSignedCertificate();
} catch (CertificateException e) {
throw new VertxException(e);
}
}
public SelfSignedCertificateImpl(String fqdn) {
try {
certificate = new io.netty.handler.ssl.util.SelfSignedCertificate(fqdn);
} catch (CertificateException e) {
throw new VertxException(e);
}
}
@Override
public PemKeyCertOptions keyCertOptions() {
return new PemKeyCertOptions()
.setKeyPath(privateKeyPath())
.setCertPath(certificatePath());
}
@Override
public PemTrustOptions trustOptions() {
return new PemTrustOptions().addCertPath(certificatePath());
}
@Override
public String privateKeyPath() {
return certificate.privateKey().getAbsolutePath();
}
@Override
public String certificatePath() {
return certificate.certificate().getAbsolutePath();
}
@Override
public void delete() {
certificate.delete();
}
}
|
SelfSignedCertificateImpl
|
java
|
elastic__elasticsearch
|
x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene70/fst/FST.java
|
{
"start": 12279,
"end": 61253
}
|
class ____ {
/** See {@link BitTableUtil#isBitSet(int, BytesReader)}. */
static boolean isBitSet(int bitIndex, Arc<?> arc, BytesReader in) throws IOException {
assert arc.nodeFlags() == ARCS_FOR_DIRECT_ADDRESSING;
in.setPosition(arc.bitTableStart);
return BitTableUtil.isBitSet(bitIndex, in);
}
/**
* See {@link BitTableUtil#countBits(int, BytesReader)}. The count of bit set is the
* number of arcs of a direct addressing node.
*/
static int countBits(Arc<?> arc, BytesReader in) throws IOException {
assert arc.nodeFlags() == ARCS_FOR_DIRECT_ADDRESSING;
in.setPosition(arc.bitTableStart);
return BitTableUtil.countBits(getNumPresenceBytes(arc.numArcs()), in);
}
/** See {@link BitTableUtil#countBitsUpTo(int, BytesReader)}. */
static int countBitsUpTo(int bitIndex, Arc<?> arc, BytesReader in) throws IOException {
assert arc.nodeFlags() == ARCS_FOR_DIRECT_ADDRESSING;
in.setPosition(arc.bitTableStart);
return BitTableUtil.countBitsUpTo(bitIndex, in);
}
/** See {@link BitTableUtil#nextBitSet(int, int, BytesReader)}. */
static int nextBitSet(int bitIndex, Arc<?> arc, BytesReader in) throws IOException {
assert arc.nodeFlags() == ARCS_FOR_DIRECT_ADDRESSING;
in.setPosition(arc.bitTableStart);
return BitTableUtil.nextBitSet(bitIndex, getNumPresenceBytes(arc.numArcs()), in);
}
/** See {@link BitTableUtil#previousBitSet(int, BytesReader)}. */
static int previousBitSet(int bitIndex, Arc<?> arc, BytesReader in) throws IOException {
assert arc.nodeFlags() == ARCS_FOR_DIRECT_ADDRESSING;
in.setPosition(arc.bitTableStart);
return BitTableUtil.previousBitSet(bitIndex, in);
}
/** Asserts the bit-table of the provided {@link Arc} is valid. */
static boolean assertIsValid(Arc<?> arc, BytesReader in) throws IOException {
assert arc.bytesPerArc() > 0;
assert arc.nodeFlags() == ARCS_FOR_DIRECT_ADDRESSING;
// First bit must be set.
assert isBitSet(0, arc, in);
// Last bit must be set.
assert isBitSet(arc.numArcs() - 1, arc, in);
// No bit set after the last arc.
assert nextBitSet(arc.numArcs() - 1, arc, in) == -1;
return true;
}
}
}
private static boolean flag(int flags, int bit) {
return (flags & bit) != 0;
}
// make a new empty FST, for building; Builder invokes this
FST(INPUT_TYPE inputType, Outputs<T> outputs, int bytesPageBits) {
this.inputType = inputType;
this.outputs = outputs;
fstStore = null;
bytes = new BytesStore(bytesPageBits);
// pad: ensure no node gets address 0 which is reserved to mean
// the stop state w/ no arcs
bytes.writeByte((byte) 0);
emptyOutput = null;
this.version = VERSION_CURRENT;
}
private static final int DEFAULT_MAX_BLOCK_BITS = Constants.JRE_IS_64BIT ? 30 : 28;
/** Load a previously saved FST. */
public FST(DataInput metaIn, DataInput in, Outputs<T> outputs) throws IOException {
this(metaIn, in, outputs, new OnHeapFSTStore(DEFAULT_MAX_BLOCK_BITS));
}
/**
* Load a previously saved FST; maxBlockBits allows you to control the size of the byte[] pages
* used to hold the FST bytes.
*/
public FST(DataInput metaIn, DataInput in, Outputs<T> outputs, FSTStore fstStore) throws IOException {
bytes = null;
this.fstStore = fstStore;
this.outputs = outputs;
// NOTE: only reads formats VERSION_START up to VERSION_CURRENT; we don't have
// back-compat promise for FSTs (they are experimental), but we are sometimes able to offer it
this.version = CodecUtil.checkHeader(metaIn, FILE_FORMAT_NAME, VERSION_START, VERSION_CURRENT);
if (version < VERSION_PACKED_REMOVED) {
if (in.readByte() == 1) {
throw new CorruptIndexException("Cannot read packed FSTs anymore", in);
}
}
if (metaIn.readByte() == 1) {
// accepts empty string
// 1 KB blocks:
BytesStore emptyBytes = new BytesStore(10);
int numBytes = metaIn.readVInt();
emptyBytes.copyBytes(metaIn, numBytes);
// De-serialize empty-string output:
BytesReader reader = emptyBytes.getReverseReader();
// NoOutputs uses 0 bytes when writing its output,
// so we have to check here else BytesStore gets
// angry:
if (numBytes > 0) {
reader.setPosition(numBytes - 1);
}
emptyOutput = outputs.readFinalOutput(reader);
} else {
emptyOutput = null;
}
final byte t = metaIn.readByte();
switch (t) {
case 0:
inputType = INPUT_TYPE.BYTE1;
break;
case 1:
inputType = INPUT_TYPE.BYTE2;
break;
case 2:
inputType = INPUT_TYPE.BYTE4;
break;
default:
throw new CorruptIndexException("invalid input type " + t, in);
}
startNode = metaIn.readVLong();
if (version < VERSION_NO_NODE_ARC_COUNTS) {
metaIn.readVLong();
metaIn.readVLong();
metaIn.readVLong();
}
long numBytes = metaIn.readVLong();
this.fstStore.init(in, numBytes);
}
@Override
public long ramBytesUsed() {
long size = BASE_RAM_BYTES_USED;
if (this.fstStore != null) {
size += this.fstStore.ramBytesUsed();
} else {
size += bytes.ramBytesUsed();
}
return size;
}
@Override
public String toString() {
return getClass().getSimpleName() + "(input=" + inputType + ",output=" + outputs;
}
void finish(long newStartNode) throws IOException {
assert newStartNode <= bytes.getPosition();
if (startNode != -1) {
throw new IllegalStateException("already finished");
}
if (newStartNode == FINAL_END_NODE && emptyOutput != null) {
newStartNode = 0;
}
startNode = newStartNode;
bytes.finish();
}
public T getEmptyOutput() {
return emptyOutput;
}
void setEmptyOutput(T v) {
if (emptyOutput != null) {
emptyOutput = outputs.merge(emptyOutput, v);
} else {
emptyOutput = v;
}
}
public void save(DataOutput metaOut, DataOutput out) throws IOException {
if (startNode == -1) {
throw new IllegalStateException("call finish first");
}
CodecUtil.writeHeader(metaOut, FILE_FORMAT_NAME, VERSION_CURRENT);
// TODO: really we should encode this as an arc, arriving
// to the root node, instead of special casing here:
if (emptyOutput != null) {
// Accepts empty string
metaOut.writeByte((byte) 1);
// Serialize empty-string output:
ByteBuffersDataOutput ros = new ByteBuffersDataOutput();
outputs.writeFinalOutput(emptyOutput, ros);
byte[] emptyOutputBytes = ros.toArrayCopy();
int emptyLen = emptyOutputBytes.length;
// reverse
final int stopAt = emptyLen / 2;
int upto = 0;
while (upto < stopAt) {
final byte b = emptyOutputBytes[upto];
emptyOutputBytes[upto] = emptyOutputBytes[emptyLen - upto - 1];
emptyOutputBytes[emptyLen - upto - 1] = b;
upto++;
}
metaOut.writeVInt(emptyLen);
metaOut.writeBytes(emptyOutputBytes, 0, emptyLen);
} else {
metaOut.writeByte((byte) 0);
}
final byte t;
if (inputType == FST.INPUT_TYPE.BYTE1) {
t = 0;
} else if (inputType == FST.INPUT_TYPE.BYTE2) {
t = 1;
} else {
t = 2;
}
metaOut.writeByte(t);
metaOut.writeVLong(startNode);
if (bytes != null) {
long numBytes = bytes.getPosition();
metaOut.writeVLong(numBytes);
bytes.writeTo(out);
} else {
assert fstStore != null;
fstStore.writeTo(out);
}
}
/** Writes an automaton to a file. */
public void save(final Path path) throws IOException {
try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(path))) {
DataOutput out = new OutputStreamDataOutput(os);
save(out, out);
}
}
/** Reads an automaton from a file. */
public static <T> FST<T> read(Path path, Outputs<T> outputs) throws IOException {
try (InputStream is = Files.newInputStream(path)) {
DataInput in = new InputStreamDataInput(new BufferedInputStream(is));
return new FST<>(in, in, outputs);
}
}
private void writeLabel(DataOutput out, int v) throws IOException {
assert v >= 0 : "v=" + v;
if (inputType == FST.INPUT_TYPE.BYTE1) {
assert v <= 255 : "v=" + v;
out.writeByte((byte) v);
} else if (inputType == FST.INPUT_TYPE.BYTE2) {
assert v <= 65535 : "v=" + v;
out.writeShort((short) v);
} else {
out.writeVInt(v);
}
}
/** Reads one BYTE1/2/4 label from the provided {@link DataInput}. */
public int readLabel(DataInput in) throws IOException {
final int v;
if (inputType == INPUT_TYPE.BYTE1) {
// Unsigned byte:
v = in.readByte() & 0xFF;
} else if (inputType == INPUT_TYPE.BYTE2) {
// Unsigned short:
if (version < VERSION_LITTLE_ENDIAN) {
v = Short.reverseBytes(in.readShort()) & 0xFFFF;
} else {
v = in.readShort() & 0xFFFF;
}
} else {
v = in.readVInt();
}
return v;
}
/** returns true if the node at this address has any outgoing arcs */
public static <T> boolean targetHasArcs(Arc<T> arc) {
return arc.target() > 0;
}
// serializes new node by appending its bytes to the end
// of the current byte[]
long addNode(FSTCompiler<T> fstCompiler, FSTCompiler.UnCompiledNode<T> nodeIn) throws IOException {
T NO_OUTPUT = outputs.getNoOutput();
// System.out.println("FST.addNode pos=" + bytes.getPosition() + " numArcs=" + nodeIn.numArcs);
if (nodeIn.numArcs == 0) {
if (nodeIn.isFinal) {
return FINAL_END_NODE;
} else {
return NON_FINAL_END_NODE;
}
}
final long startAddress = fstCompiler.bytes.getPosition();
// System.out.println(" startAddr=" + startAddress);
final boolean doFixedLengthArcs = shouldExpandNodeWithFixedLengthArcs(fstCompiler, nodeIn);
if (doFixedLengthArcs) {
// System.out.println(" fixed length arcs");
if (fstCompiler.numBytesPerArc.length < nodeIn.numArcs) {
fstCompiler.numBytesPerArc = new int[ArrayUtil.oversize(nodeIn.numArcs, Integer.BYTES)];
fstCompiler.numLabelBytesPerArc = new int[fstCompiler.numBytesPerArc.length];
}
}
fstCompiler.arcCount += nodeIn.numArcs;
final int lastArc = nodeIn.numArcs - 1;
long lastArcStart = fstCompiler.bytes.getPosition();
int maxBytesPerArc = 0;
int maxBytesPerArcWithoutLabel = 0;
for (int arcIdx = 0; arcIdx < nodeIn.numArcs; arcIdx++) {
final FSTCompiler.Arc<T> arc = nodeIn.arcs[arcIdx];
final FSTCompiler.CompiledNode target = (FSTCompiler.CompiledNode) arc.target;
int flags = 0;
// System.out.println(" arc " + arcIdx + " label=" + arc.label + " -> target=" +
// target.node);
if (arcIdx == lastArc) {
flags += BIT_LAST_ARC;
}
if (fstCompiler.lastFrozenNode == target.node && doFixedLengthArcs == false) {
// TODO: for better perf (but more RAM used) we
// could avoid this except when arc is "near" the
// last arc:
flags += BIT_TARGET_NEXT;
}
if (arc.isFinal) {
flags += BIT_FINAL_ARC;
if (arc.nextFinalOutput != NO_OUTPUT) {
flags += BIT_ARC_HAS_FINAL_OUTPUT;
}
} else {
assert arc.nextFinalOutput == NO_OUTPUT;
}
boolean targetHasArcs = target.node > 0;
if (targetHasArcs == false) {
flags += BIT_STOP_NODE;
}
if (arc.output != NO_OUTPUT) {
flags += BIT_ARC_HAS_OUTPUT;
}
fstCompiler.bytes.writeByte((byte) flags);
long labelStart = fstCompiler.bytes.getPosition();
writeLabel(fstCompiler.bytes, arc.label);
int numLabelBytes = (int) (fstCompiler.bytes.getPosition() - labelStart);
// System.out.println(" write arc: label=" + (char) arc.label + " flags=" + flags + "
// target=" + target.node + " pos=" + bytes.getPosition() + " output=" +
// outputs.outputToString(arc.output));
if (arc.output != NO_OUTPUT) {
outputs.write(arc.output, fstCompiler.bytes);
// System.out.println(" write output");
}
if (arc.nextFinalOutput != NO_OUTPUT) {
// System.out.println(" write final output");
outputs.writeFinalOutput(arc.nextFinalOutput, fstCompiler.bytes);
}
if (targetHasArcs && (flags & BIT_TARGET_NEXT) == 0) {
assert target.node > 0;
// System.out.println(" write target");
fstCompiler.bytes.writeVLong(target.node);
}
// just write the arcs "like normal" on first pass, but record how many bytes each one took
// and max byte size:
if (doFixedLengthArcs) {
int numArcBytes = (int) (fstCompiler.bytes.getPosition() - lastArcStart);
fstCompiler.numBytesPerArc[arcIdx] = numArcBytes;
fstCompiler.numLabelBytesPerArc[arcIdx] = numLabelBytes;
lastArcStart = fstCompiler.bytes.getPosition();
maxBytesPerArc = Math.max(maxBytesPerArc, numArcBytes);
maxBytesPerArcWithoutLabel = Math.max(maxBytesPerArcWithoutLabel, numArcBytes - numLabelBytes);
// System.out.println(" arcBytes=" + numArcBytes + " labelBytes=" + numLabelBytes);
}
}
// TODO: try to avoid wasteful cases: disable doFixedLengthArcs in that case
/*
*
* LUCENE-4682: what is a fair heuristic here?
* It could involve some of these:
* 1. how "busy" the node is: nodeIn.inputCount relative to frontier[0].inputCount?
* 2. how much binSearch saves over scan: nodeIn.numArcs
* 3. waste: numBytes vs numBytesExpanded
*
* the one below just looks at #3
if (doFixedLengthArcs) {
// rough heuristic: make this 1.25 "waste factor" a parameter to the phd ctor????
int numBytes = lastArcStart - startAddress;
int numBytesExpanded = maxBytesPerArc * nodeIn.numArcs;
if (numBytesExpanded > numBytes*1.25) {
doFixedLengthArcs = false;
}
}
*/
if (doFixedLengthArcs) {
assert maxBytesPerArc > 0;
// 2nd pass just "expands" all arcs to take up a fixed byte size
int labelRange = nodeIn.arcs[nodeIn.numArcs - 1].label - nodeIn.arcs[0].label + 1;
assert labelRange > 0;
if (shouldExpandNodeWithDirectAddressing(fstCompiler, nodeIn, maxBytesPerArc, maxBytesPerArcWithoutLabel, labelRange)) {
writeNodeForDirectAddressing(fstCompiler, nodeIn, startAddress, maxBytesPerArcWithoutLabel, labelRange);
fstCompiler.directAddressingNodeCount++;
} else {
writeNodeForBinarySearch(fstCompiler, nodeIn, startAddress, maxBytesPerArc);
fstCompiler.binarySearchNodeCount++;
}
}
final long thisNodeAddress = fstCompiler.bytes.getPosition() - 1;
fstCompiler.bytes.reverse(startAddress, thisNodeAddress);
fstCompiler.nodeCount++;
return thisNodeAddress;
}
/**
* Returns whether the given node should be expanded with fixed length arcs. Nodes will be
* expanded depending on their depth (distance from the root node) and their number of arcs.
*
* <p>Nodes with fixed length arcs use more space, because they encode all arcs with a fixed
* number of bytes, but they allow either binary search or direct addressing on the arcs (instead
* of linear scan) on lookup by arc label.
*/
private boolean shouldExpandNodeWithFixedLengthArcs(FSTCompiler<T> fstCompiler, FSTCompiler.UnCompiledNode<T> node) {
return fstCompiler.allowFixedLengthArcs
&& ((node.depth <= FIXED_LENGTH_ARC_SHALLOW_DEPTH && node.numArcs >= FIXED_LENGTH_ARC_SHALLOW_NUM_ARCS)
|| node.numArcs >= FIXED_LENGTH_ARC_DEEP_NUM_ARCS);
}
/**
* Returns whether the given node should be expanded with direct addressing instead of binary
* search.
*
* <p>Prefer direct addressing for performance if it does not oversize binary search byte size too
* much, so that the arcs can be directly addressed by label.
*
* @see FSTCompiler#getDirectAddressingMaxOversizingFactor()
*/
private boolean shouldExpandNodeWithDirectAddressing(
FSTCompiler<T> fstCompiler,
FSTCompiler.UnCompiledNode<T> nodeIn,
int numBytesPerArc,
int maxBytesPerArcWithoutLabel,
int labelRange
) {
// Anticipate precisely the size of the encodings.
int sizeForBinarySearch = numBytesPerArc * nodeIn.numArcs;
int sizeForDirectAddressing = getNumPresenceBytes(labelRange) + fstCompiler.numLabelBytesPerArc[0] + maxBytesPerArcWithoutLabel
* nodeIn.numArcs;
// Determine the allowed oversize compared to binary search.
// This is defined by a parameter of FST Builder (default 1: no oversize).
int allowedOversize = (int) (sizeForBinarySearch * fstCompiler.getDirectAddressingMaxOversizingFactor());
int expansionCost = sizeForDirectAddressing - allowedOversize;
// Select direct addressing if either:
// - Direct addressing size is smaller than binary search.
// In this case, increment the credit by the reduced size (to use it later).
// - Direct addressing size is larger than binary search, but the positive credit allows the
// oversizing.
// In this case, decrement the credit by the oversize.
// In addition, do not try to oversize to a clearly too large node size
// (this is the DIRECT_ADDRESSING_MAX_OVERSIZE_WITH_CREDIT_FACTOR parameter).
if (expansionCost <= 0
|| (fstCompiler.directAddressingExpansionCredit >= expansionCost
&& sizeForDirectAddressing <= allowedOversize * DIRECT_ADDRESSING_MAX_OVERSIZE_WITH_CREDIT_FACTOR)) {
fstCompiler.directAddressingExpansionCredit -= expansionCost;
return true;
}
return false;
}
private void writeNodeForBinarySearch(
FSTCompiler<T> fstCompiler,
FSTCompiler.UnCompiledNode<T> nodeIn,
long startAddress,
int maxBytesPerArc
) {
// Build the header in a buffer.
// It is a false/special arc which is in fact a node header with node flags followed by node
// metadata.
fstCompiler.fixedLengthArcsBuffer.resetPosition()
.writeByte(ARCS_FOR_BINARY_SEARCH)
.writeVInt(nodeIn.numArcs)
.writeVInt(maxBytesPerArc);
int headerLen = fstCompiler.fixedLengthArcsBuffer.getPosition();
// Expand the arcs in place, backwards.
long srcPos = fstCompiler.bytes.getPosition();
long destPos = startAddress + headerLen + nodeIn.numArcs * maxBytesPerArc;
assert destPos >= srcPos;
if (destPos > srcPos) {
fstCompiler.bytes.skipBytes((int) (destPos - srcPos));
for (int arcIdx = nodeIn.numArcs - 1; arcIdx >= 0; arcIdx--) {
destPos -= maxBytesPerArc;
int arcLen = fstCompiler.numBytesPerArc[arcIdx];
srcPos -= arcLen;
if (srcPos != destPos) {
assert destPos > srcPos
: "destPos="
+ destPos
+ " srcPos="
+ srcPos
+ " arcIdx="
+ arcIdx
+ " maxBytesPerArc="
+ maxBytesPerArc
+ " arcLen="
+ arcLen
+ " nodeIn.numArcs="
+ nodeIn.numArcs;
fstCompiler.bytes.copyBytes(srcPos, destPos, arcLen);
}
}
}
// Write the header.
fstCompiler.bytes.writeBytes(startAddress, fstCompiler.fixedLengthArcsBuffer.getBytes(), 0, headerLen);
}
private void writeNodeForDirectAddressing(
FSTCompiler<T> fstCompiler,
FSTCompiler.UnCompiledNode<T> nodeIn,
long startAddress,
int maxBytesPerArcWithoutLabel,
int labelRange
) {
// Expand the arcs backwards in a buffer because we remove the labels.
// So the obtained arcs might occupy less space. This is the reason why this
// whole method is more complex.
// Drop the label bytes since we can infer the label based on the arc index,
// the presence bits, and the first label. Keep the first label.
int headerMaxLen = 11;
int numPresenceBytes = getNumPresenceBytes(labelRange);
long srcPos = fstCompiler.bytes.getPosition();
int totalArcBytes = fstCompiler.numLabelBytesPerArc[0] + nodeIn.numArcs * maxBytesPerArcWithoutLabel;
int bufferOffset = headerMaxLen + numPresenceBytes + totalArcBytes;
byte[] buffer = fstCompiler.fixedLengthArcsBuffer.ensureCapacity(bufferOffset).getBytes();
// Copy the arcs to the buffer, dropping all labels except first one.
for (int arcIdx = nodeIn.numArcs - 1; arcIdx >= 0; arcIdx--) {
bufferOffset -= maxBytesPerArcWithoutLabel;
int srcArcLen = fstCompiler.numBytesPerArc[arcIdx];
srcPos -= srcArcLen;
int labelLen = fstCompiler.numLabelBytesPerArc[arcIdx];
// Copy the flags.
fstCompiler.bytes.copyBytes(srcPos, buffer, bufferOffset, 1);
// Skip the label, copy the remaining.
int remainingArcLen = srcArcLen - 1 - labelLen;
if (remainingArcLen != 0) {
fstCompiler.bytes.copyBytes(srcPos + 1 + labelLen, buffer, bufferOffset + 1, remainingArcLen);
}
if (arcIdx == 0) {
// Copy the label of the first arc only.
bufferOffset -= labelLen;
fstCompiler.bytes.copyBytes(srcPos + 1, buffer, bufferOffset, labelLen);
}
}
assert bufferOffset == headerMaxLen + numPresenceBytes;
// Build the header in the buffer.
// It is a false/special arc which is in fact a node header with node flags followed by node
// metadata.
fstCompiler.fixedLengthArcsBuffer.resetPosition()
.writeByte(ARCS_FOR_DIRECT_ADDRESSING)
.writeVInt(labelRange) // labelRange instead of numArcs.
.writeVInt(maxBytesPerArcWithoutLabel); // maxBytesPerArcWithoutLabel instead of maxBytesPerArc.
int headerLen = fstCompiler.fixedLengthArcsBuffer.getPosition();
// Prepare the builder byte store. Enlarge or truncate if needed.
long nodeEnd = startAddress + headerLen + numPresenceBytes + totalArcBytes;
long currentPosition = fstCompiler.bytes.getPosition();
if (nodeEnd >= currentPosition) {
fstCompiler.bytes.skipBytes((int) (nodeEnd - currentPosition));
} else {
fstCompiler.bytes.truncate(nodeEnd);
}
assert fstCompiler.bytes.getPosition() == nodeEnd;
// Write the header.
long writeOffset = startAddress;
fstCompiler.bytes.writeBytes(writeOffset, fstCompiler.fixedLengthArcsBuffer.getBytes(), 0, headerLen);
writeOffset += headerLen;
// Write the presence bits
writePresenceBits(fstCompiler, nodeIn, writeOffset, numPresenceBytes);
writeOffset += numPresenceBytes;
// Write the first label and the arcs.
fstCompiler.bytes.writeBytes(writeOffset, fstCompiler.fixedLengthArcsBuffer.getBytes(), bufferOffset, totalArcBytes);
}
private void writePresenceBits(FSTCompiler<T> fstCompiler, FSTCompiler.UnCompiledNode<T> nodeIn, long dest, int numPresenceBytes) {
long bytePos = dest;
byte presenceBits = 1; // The first arc is always present.
int presenceIndex = 0;
int previousLabel = nodeIn.arcs[0].label;
for (int arcIdx = 1; arcIdx < nodeIn.numArcs; arcIdx++) {
int label = nodeIn.arcs[arcIdx].label;
assert label > previousLabel;
presenceIndex += label - previousLabel;
while (presenceIndex >= Byte.SIZE) {
fstCompiler.bytes.writeByte(bytePos++, presenceBits);
presenceBits = 0;
presenceIndex -= Byte.SIZE;
}
// Set the bit at presenceIndex to flag that the corresponding arc is present.
presenceBits |= (byte) (1 << presenceIndex);
previousLabel = label;
}
assert presenceIndex == (nodeIn.arcs[nodeIn.numArcs - 1].label - nodeIn.arcs[0].label) % 8;
assert presenceBits != 0; // The last byte is not 0.
assert (presenceBits & (1 << presenceIndex)) != 0; // The last arc is always present.
fstCompiler.bytes.writeByte(bytePos++, presenceBits);
assert bytePos - dest == numPresenceBytes;
}
/**
* Gets the number of bytes required to flag the presence of each arc in the given label range,
* one bit per arc.
*/
private static int getNumPresenceBytes(int labelRange) {
assert labelRange >= 0;
return (labelRange + 7) >> 3;
}
/**
* Reads the presence bits of a direct-addressing node. Actually we don't read them here, we just
* keep the pointer to the bit-table start and we skip them.
*/
private void readPresenceBytes(Arc<T> arc, BytesReader in) throws IOException {
assert arc.bytesPerArc() > 0;
assert arc.nodeFlags() == ARCS_FOR_DIRECT_ADDRESSING;
arc.bitTableStart = in.getPosition();
in.skipBytes(getNumPresenceBytes(arc.numArcs()));
}
/** Fills virtual 'start' arc, ie, an empty incoming arc to the FST's start node */
public Arc<T> getFirstArc(Arc<T> arc) {
T NO_OUTPUT = outputs.getNoOutput();
if (emptyOutput != null) {
arc.flags = BIT_FINAL_ARC | BIT_LAST_ARC;
arc.nextFinalOutput = emptyOutput;
if (emptyOutput != NO_OUTPUT) {
arc.flags = (byte) (arc.flags() | BIT_ARC_HAS_FINAL_OUTPUT);
}
} else {
arc.flags = BIT_LAST_ARC;
arc.nextFinalOutput = NO_OUTPUT;
}
arc.output = NO_OUTPUT;
// If there are no nodes, ie, the FST only accepts the
// empty string, then startNode is 0
arc.target = startNode;
return arc;
}
/**
* Follows the <code>follow</code> arc and reads the last arc of its target; this changes the
* provided <code>arc</code> (2nd arg) in-place and returns it.
*
* @return Returns the second argument (<code>arc</code>).
*/
Arc<T> readLastTargetArc(Arc<T> follow, Arc<T> arc, BytesReader in) throws IOException {
// System.out.println("readLast");
if (targetHasArcs(follow) == false) {
// System.out.println(" end node");
assert follow.isFinal();
arc.label = END_LABEL;
arc.target = FINAL_END_NODE;
arc.output = follow.nextFinalOutput();
arc.flags = BIT_LAST_ARC;
arc.nodeFlags = arc.flags;
return arc;
} else {
in.setPosition(follow.target());
byte flags = arc.nodeFlags = in.readByte();
if (flags == ARCS_FOR_BINARY_SEARCH || flags == ARCS_FOR_DIRECT_ADDRESSING) {
// Special arc which is actually a node header for fixed length arcs.
// Jump straight to end to find the last arc.
arc.numArcs = in.readVInt();
if (version >= VERSION_VINT_TARGET) {
arc.bytesPerArc = in.readVInt();
} else {
arc.bytesPerArc = in.readInt();
}
// System.out.println(" array numArcs=" + arc.numArcs + " bpa=" + arc.bytesPerArc);
if (flags == ARCS_FOR_DIRECT_ADDRESSING) {
readPresenceBytes(arc, in);
arc.firstLabel = readLabel(in);
arc.posArcsStart = in.getPosition();
readLastArcByDirectAddressing(arc, in);
} else {
arc.arcIdx = arc.numArcs() - 2;
arc.posArcsStart = in.getPosition();
readNextRealArc(arc, in);
}
} else {
arc.flags = flags;
// non-array: linear scan
arc.bytesPerArc = 0;
// System.out.println(" scan");
while (arc.isLast() == false) {
// skip this arc:
readLabel(in);
if (arc.flag(BIT_ARC_HAS_OUTPUT)) {
outputs.skipOutput(in);
}
if (arc.flag(BIT_ARC_HAS_FINAL_OUTPUT)) {
outputs.skipFinalOutput(in);
}
if (arc.flag(BIT_STOP_NODE)) {} else if (arc.flag(BIT_TARGET_NEXT)) {} else {
readUnpackedNodeTarget(in);
}
arc.flags = in.readByte();
}
// Undo the byte flags we read:
in.skipBytes(-1);
arc.nextArc = in.getPosition();
readNextRealArc(arc, in);
}
assert arc.isLast();
return arc;
}
}
private long readUnpackedNodeTarget(BytesReader in) throws IOException {
if (version < VERSION_VINT_TARGET) {
return in.readInt();
} else {
return in.readVLong();
}
}
/**
* Follow the <code>follow</code> arc and read the first arc of its target; this changes the
* provided <code>arc</code> (2nd arg) in-place and returns it.
*
* @return Returns the second argument (<code>arc</code>).
*/
public Arc<T> readFirstTargetArc(Arc<T> follow, Arc<T> arc, BytesReader in) throws IOException {
// int pos = address;
// System.out.println(" readFirstTarget follow.target=" + follow.target + " isFinal=" +
// follow.isFinal());
if (follow.isFinal()) {
// Insert "fake" final first arc:
arc.label = END_LABEL;
arc.output = follow.nextFinalOutput();
arc.flags = BIT_FINAL_ARC;
if (follow.target() <= 0) {
arc.flags |= BIT_LAST_ARC;
} else {
// NOTE: nextArc is a node (not an address!) in this case:
arc.nextArc = follow.target();
}
arc.target = FINAL_END_NODE;
arc.nodeFlags = arc.flags;
// System.out.println(" insert isFinal; nextArc=" + follow.target + " isLast=" +
// arc.isLast() + " output=" + outputs.outputToString(arc.output));
return arc;
} else {
return readFirstRealTargetArc(follow.target(), arc, in);
}
}
public Arc<T> readFirstRealTargetArc(long nodeAddress, Arc<T> arc, final BytesReader in) throws IOException {
in.setPosition(nodeAddress);
// System.out.println(" flags=" + arc.flags);
byte flags = arc.nodeFlags = in.readByte();
if (flags == ARCS_FOR_BINARY_SEARCH || flags == ARCS_FOR_DIRECT_ADDRESSING) {
// System.out.println(" fixed length arc");
// Special arc which is actually a node header for fixed length arcs.
arc.numArcs = in.readVInt();
if (version >= VERSION_VINT_TARGET) {
arc.bytesPerArc = in.readVInt();
} else {
arc.bytesPerArc = in.readInt();
}
arc.arcIdx = -1;
if (flags == ARCS_FOR_DIRECT_ADDRESSING) {
readPresenceBytes(arc, in);
arc.firstLabel = readLabel(in);
arc.presenceIndex = -1;
}
arc.posArcsStart = in.getPosition();
// System.out.println(" bytesPer=" + arc.bytesPerArc + " numArcs=" + arc.numArcs + "
// arcsStart=" + pos);
} else {
arc.nextArc = nodeAddress;
arc.bytesPerArc = 0;
}
return readNextRealArc(arc, in);
}
/**
* Returns whether <code>arc</code>'s target points to a node in expanded format (fixed length
* arcs).
*/
boolean isExpandedTarget(Arc<T> follow, BytesReader in) throws IOException {
if (targetHasArcs(follow) == false) {
return false;
} else {
in.setPosition(follow.target());
byte flags = in.readByte();
return flags == ARCS_FOR_BINARY_SEARCH || flags == ARCS_FOR_DIRECT_ADDRESSING;
}
}
/** In-place read; returns the arc. */
public Arc<T> readNextArc(Arc<T> arc, BytesReader in) throws IOException {
if (arc.label() == END_LABEL) {
// This was a fake inserted "final" arc
if (arc.nextArc() <= 0) {
throw new IllegalArgumentException("cannot readNextArc when arc.isLast()=true");
}
return readFirstRealTargetArc(arc.nextArc(), arc, in);
} else {
return readNextRealArc(arc, in);
}
}
/** Peeks at next arc's label; does not alter arc. Do not call this if arc.isLast()! */
int readNextArcLabel(Arc<T> arc, BytesReader in) throws IOException {
assert arc.isLast() == false;
if (arc.label() == END_LABEL) {
// System.out.println(" nextArc fake " + arc.nextArc);
// Next arc is the first arc of a node.
// Position to read the first arc label.
in.setPosition(arc.nextArc());
byte flags = in.readByte();
if (flags == ARCS_FOR_BINARY_SEARCH || flags == ARCS_FOR_DIRECT_ADDRESSING) {
// System.out.println(" nextArc fixed length arc");
// Special arc which is actually a node header for fixed length arcs.
int numArcs = in.readVInt();
if (version >= VERSION_VINT_TARGET) {
in.readVInt(); // Skip bytesPerArc.
} else {
in.readInt(); // Skip bytesPerArc.
}
if (flags == ARCS_FOR_BINARY_SEARCH) {
in.readByte(); // Skip arc flags.
} else {
in.skipBytes(getNumPresenceBytes(numArcs));
}
}
} else {
if (arc.bytesPerArc() != 0) {
// System.out.println(" nextArc real array");
// Arcs have fixed length.
if (arc.nodeFlags() == ARCS_FOR_BINARY_SEARCH) {
// Point to next arc, -1 to skip arc flags.
in.setPosition(arc.posArcsStart() - (1 + arc.arcIdx()) * arc.bytesPerArc() - 1);
} else {
assert arc.nodeFlags() == ARCS_FOR_DIRECT_ADDRESSING;
// Direct addressing node. The label is not stored but rather inferred
// based on first label and arc index in the range.
assert Arc.BitTable.assertIsValid(arc, in);
assert Arc.BitTable.isBitSet(arc.arcIdx(), arc, in);
int nextIndex = Arc.BitTable.nextBitSet(arc.arcIdx(), arc, in);
assert nextIndex != -1;
return arc.firstLabel() + nextIndex;
}
} else {
// Arcs have variable length.
// System.out.println(" nextArc real list");
// Position to next arc, -1 to skip flags.
in.setPosition(arc.nextArc() - 1);
}
}
return readLabel(in);
}
public Arc<T> readArcByIndex(Arc<T> arc, final BytesReader in, int idx) throws IOException {
assert arc.bytesPerArc() > 0;
assert arc.nodeFlags() == ARCS_FOR_BINARY_SEARCH;
assert idx >= 0 && idx < arc.numArcs();
in.setPosition(arc.posArcsStart() - idx * arc.bytesPerArc());
arc.arcIdx = idx;
arc.flags = in.readByte();
return readArc(arc, in);
}
/**
* Reads a present direct addressing node arc, with the provided index in the label range.
*
* @param rangeIndex The index of the arc in the label range. It must be present. The real arc
* offset is computed based on the presence bits of the direct addressing node.
*/
public Arc<T> readArcByDirectAddressing(Arc<T> arc, final BytesReader in, int rangeIndex) throws IOException {
assert Arc.BitTable.assertIsValid(arc, in);
assert rangeIndex >= 0 && rangeIndex < arc.numArcs();
assert Arc.BitTable.isBitSet(rangeIndex, arc, in);
int presenceIndex = Arc.BitTable.countBitsUpTo(rangeIndex, arc, in);
return readArcByDirectAddressing(arc, in, rangeIndex, presenceIndex);
}
/**
* Reads a present direct addressing node arc, with the provided index in the label range and its
* corresponding presence index (which is the count of presence bits before it).
*/
private Arc<T> readArcByDirectAddressing(Arc<T> arc, final BytesReader in, int rangeIndex, int presenceIndex) throws IOException {
in.setPosition(arc.posArcsStart() - presenceIndex * arc.bytesPerArc());
arc.arcIdx = rangeIndex;
arc.presenceIndex = presenceIndex;
arc.flags = in.readByte();
return readArc(arc, in);
}
/**
* Reads the last arc of a direct addressing node. This method is equivalent to call {@link
* #readArcByDirectAddressing(Arc, BytesReader, int)} with {@code rangeIndex} equal to {@code
* arc.numArcs() - 1}, but it is faster.
*/
public Arc<T> readLastArcByDirectAddressing(Arc<T> arc, final BytesReader in) throws IOException {
assert Arc.BitTable.assertIsValid(arc, in);
int presenceIndex = Arc.BitTable.countBits(arc, in) - 1;
return readArcByDirectAddressing(arc, in, arc.numArcs() - 1, presenceIndex);
}
/** Never returns null, but you should never call this if arc.isLast() is true. */
public Arc<T> readNextRealArc(Arc<T> arc, final BytesReader in) throws IOException {
// TODO: can't assert this because we call from readFirstArc
// assert !flag(arc.flags, BIT_LAST_ARC);
switch (arc.nodeFlags()) {
case ARCS_FOR_BINARY_SEARCH:
assert arc.bytesPerArc() > 0;
arc.arcIdx++;
assert arc.arcIdx() >= 0 && arc.arcIdx() < arc.numArcs();
in.setPosition(arc.posArcsStart() - arc.arcIdx() * arc.bytesPerArc());
arc.flags = in.readByte();
break;
case ARCS_FOR_DIRECT_ADDRESSING:
assert Arc.BitTable.assertIsValid(arc, in);
assert arc.arcIdx() == -1 || Arc.BitTable.isBitSet(arc.arcIdx(), arc, in);
int nextIndex = Arc.BitTable.nextBitSet(arc.arcIdx(), arc, in);
return readArcByDirectAddressing(arc, in, nextIndex, arc.presenceIndex + 1);
default:
// Variable length arcs - linear search.
assert arc.bytesPerArc() == 0;
in.setPosition(arc.nextArc());
arc.flags = in.readByte();
}
return readArc(arc, in);
}
/**
* Reads an arc. <br>
* Precondition: The arc flags byte has already been read and set; the given BytesReader is
* positioned just after the arc flags byte.
*/
private Arc<T> readArc(Arc<T> arc, BytesReader in) throws IOException {
if (arc.nodeFlags() == ARCS_FOR_DIRECT_ADDRESSING) {
arc.label = arc.firstLabel() + arc.arcIdx();
} else {
arc.label = readLabel(in);
}
if (arc.flag(BIT_ARC_HAS_OUTPUT)) {
arc.output = outputs.read(in);
} else {
arc.output = outputs.getNoOutput();
}
if (arc.flag(BIT_ARC_HAS_FINAL_OUTPUT)) {
arc.nextFinalOutput = outputs.readFinalOutput(in);
} else {
arc.nextFinalOutput = outputs.getNoOutput();
}
if (arc.flag(BIT_STOP_NODE)) {
if (arc.flag(BIT_FINAL_ARC)) {
arc.target = FINAL_END_NODE;
} else {
arc.target = NON_FINAL_END_NODE;
}
arc.nextArc = in.getPosition(); // Only useful for list.
} else if (arc.flag(BIT_TARGET_NEXT)) {
arc.nextArc = in.getPosition(); // Only useful for list.
// TODO: would be nice to make this lazy -- maybe
// caller doesn't need the target and is scanning arcs...
if (arc.flag(BIT_LAST_ARC) == false) {
if (arc.bytesPerArc() == 0) {
// must scan
seekToNextNode(in);
} else {
int numArcs = arc.nodeFlags == ARCS_FOR_DIRECT_ADDRESSING ? Arc.BitTable.countBits(arc, in) : arc.numArcs();
in.setPosition(arc.posArcsStart() - arc.bytesPerArc() * numArcs);
}
}
arc.target = in.getPosition();
} else {
arc.target = readUnpackedNodeTarget(in);
arc.nextArc = in.getPosition(); // Only useful for list.
}
return arc;
}
static <T> Arc<T> readEndArc(Arc<T> follow, Arc<T> arc) {
if (follow.isFinal()) {
if (follow.target() <= 0) {
arc.flags = FST.BIT_LAST_ARC;
} else {
arc.flags = 0;
// NOTE: nextArc is a node (not an address!) in this case:
arc.nextArc = follow.target();
}
arc.output = follow.nextFinalOutput();
arc.label = FST.END_LABEL;
return arc;
} else {
return null;
}
}
// TODO: could we somehow [partially] tableize arc lookups
// like automaton?
/**
* Finds an arc leaving the incoming arc, replacing the arc in place. This returns null if the arc
* was not found, else the incoming arc.
*/
public Arc<T> findTargetArc(int labelToMatch, Arc<T> follow, Arc<T> arc, BytesReader in) throws IOException {
if (labelToMatch == END_LABEL) {
if (follow.isFinal()) {
if (follow.target() <= 0) {
arc.flags = BIT_LAST_ARC;
} else {
arc.flags = 0;
// NOTE: nextArc is a node (not an address!) in this case:
arc.nextArc = follow.target();
}
arc.output = follow.nextFinalOutput();
arc.label = END_LABEL;
arc.nodeFlags = arc.flags;
return arc;
} else {
return null;
}
}
if (targetHasArcs(follow) == false) {
return null;
}
in.setPosition(follow.target());
// System.out.println("fta label=" + (char) labelToMatch);
byte flags = arc.nodeFlags = in.readByte();
if (flags == ARCS_FOR_DIRECT_ADDRESSING) {
arc.numArcs = in.readVInt(); // This is in fact the label range.
if (version >= VERSION_VINT_TARGET) {
arc.bytesPerArc = in.readVInt();
} else {
arc.bytesPerArc = in.readInt();
}
readPresenceBytes(arc, in);
arc.firstLabel = readLabel(in);
arc.posArcsStart = in.getPosition();
int arcIndex = labelToMatch - arc.firstLabel();
if (arcIndex < 0 || arcIndex >= arc.numArcs()) {
return null; // Before or after label range.
} else if (Arc.BitTable.isBitSet(arcIndex, arc, in) == false) {
return null; // Arc missing in the range.
}
return readArcByDirectAddressing(arc, in, arcIndex);
} else if (flags == ARCS_FOR_BINARY_SEARCH) {
arc.numArcs = in.readVInt();
if (version >= VERSION_VINT_TARGET) {
arc.bytesPerArc = in.readVInt();
} else {
arc.bytesPerArc = in.readInt();
}
arc.posArcsStart = in.getPosition();
// Array is sparse; do binary search:
int low = 0;
int high = arc.numArcs() - 1;
while (low <= high) {
// System.out.println(" cycle");
int mid = (low + high) >>> 1;
// +1 to skip over flags
in.setPosition(arc.posArcsStart() - (arc.bytesPerArc() * mid + 1));
int midLabel = readLabel(in);
final int cmp = midLabel - labelToMatch;
if (cmp < 0) {
low = mid + 1;
} else if (cmp > 0) {
high = mid - 1;
} else {
arc.arcIdx = mid - 1;
// System.out.println(" found!");
return readNextRealArc(arc, in);
}
}
return null;
}
// Linear scan
readFirstRealTargetArc(follow.target(), arc, in);
while (true) {
// System.out.println(" non-bs cycle");
// TODO: we should fix this code to not have to create
// object for the output of every arc we scan... only
// for the matching arc, if found
if (arc.label() == labelToMatch) {
// System.out.println(" found!");
return arc;
} else if (arc.label() > labelToMatch) {
return null;
} else if (arc.isLast()) {
return null;
} else {
readNextRealArc(arc, in);
}
}
}
private void seekToNextNode(BytesReader in) throws IOException {
while (true) {
final int flags = in.readByte();
readLabel(in);
if (flag(flags, BIT_ARC_HAS_OUTPUT)) {
outputs.skipOutput(in);
}
if (flag(flags, BIT_ARC_HAS_FINAL_OUTPUT)) {
outputs.skipFinalOutput(in);
}
if (flag(flags, BIT_STOP_NODE) == false && flag(flags, BIT_TARGET_NEXT) == false) {
readUnpackedNodeTarget(in);
}
if (flag(flags, BIT_LAST_ARC)) {
return;
}
}
}
/** Returns a {@link BytesReader} for this FST, positioned at position 0. */
public BytesReader getBytesReader() {
if (this.fstStore != null) {
return this.fstStore.getReverseBytesReader();
} else {
return bytes.getReverseReader();
}
}
/** Reads bytes stored in an FST. */
public abstract static
|
BitTable
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/sorted/set/SortComparatorTest.java
|
{
"start": 924,
"end": 1828
}
|
class ____ {
@Test
public void testSortComparator(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Owner owner = new Owner();
Cat cat1 = new Cat();
Cat cat2 = new Cat();
cat1.owner = owner;
cat1.name = "B";
cat1.nickname = "B";
cat2.owner = owner;
cat2.name = "a";
cat2.nickname = "a";
owner.cats.add( cat1 );
owner.cats.add( cat2 );
session.persist( owner );
session.getTransaction().commit();
session.clear();
session.beginTransaction();
owner = session.get( Owner.class, owner.id );
assertThat( owner.cats ).isNotNull();
assertThat( owner.cats.size() ).isEqualTo( 2 );
assertThat( owner.cats.first().nickname ).isEqualTo( "a" );
assertThat( owner.cats.last().nickname ).isEqualTo( "B" );
}
);
}
@Entity(name = "Owner")
@Table(name = "Owner")
static
|
SortComparatorTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
|
{
"start": 1061,
"end": 3169
}
|
class ____ implements Authenticator {
/**
* Name of the additional parameter that carries the 'user.name' value.
*/
public static final String USER_NAME = "user.name";
private static final String USER_NAME_EQ = USER_NAME + "=";
private ConnectionConfigurator connConfigurator;
/**
* Sets a {@link ConnectionConfigurator} instance to use for
* configuring connections.
*
* @param configurator the {@link ConnectionConfigurator} instance.
*/
@Override
public void setConnectionConfigurator(ConnectionConfigurator configurator) {
connConfigurator = configurator;
}
/**
* Performs simple authentication against the specified URL.
* <p>
* If a token is given it does a NOP and returns the given token.
* <p>
* If no token is given, it will perform an HTTP <code>OPTIONS</code> request injecting an additional
* parameter {@link #USER_NAME} in the query string with the value returned by the {@link #getUserName()}
* method.
* <p>
* If the response is successful it will update the authentication token.
*
* @param url the URl to authenticate against.
* @param token the authentication token being used for the user.
*
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication error occurred.
*/
@Override
public void authenticate(URL url, AuthenticatedURL.Token token) throws IOException, AuthenticationException {
String strUrl = url.toString();
String paramSeparator = (strUrl.contains("?")) ? "&" : "?";
strUrl += paramSeparator + USER_NAME_EQ + getUserName();
url = new URL(strUrl);
HttpURLConnection conn = token.openConnection(url, connConfigurator);
conn.setRequestMethod("OPTIONS");
conn.connect();
AuthenticatedURL.extractToken(conn, token);
}
/**
* Returns the current user name.
* <p>
* This implementation returns the value of the Java system property 'user.name'
*
* @return the current user name.
*/
protected String getUserName() {
return System.getProperty("user.name");
}
}
|
PseudoAuthenticator
|
java
|
quarkusio__quarkus
|
integration-tests/spring-data-jpa/src/main/java/io/quarkus/it/spring/data/jpa/PostRepository.java
|
{
"start": 138,
"end": 337
}
|
interface ____ extends IntermediatePostRepository<Object, Post, Object> {
List<Post> findAllByOrganization(String organization);
long deleteByOrganization(String organization);
}
|
PostRepository
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/primitive_result_type/PrimitiveResultTypeTest.java
|
{
"start": 1059,
"end": 1997
}
|
class ____ {
@BeforeAll
static void setup() throws Exception {
BaseDataTest.runScript(IbatisConfig.getSqlSessionFactory().getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/primitive_result_type/create.sql");
}
@Test
void shouldReturnProperPrimitiveType() {
List<Integer> codes = ProductDAO.selectProductCodes();
for (Object code : codes) {
assertTrue(code instanceof Integer);
}
List<Long> lcodes = ProductDAO.selectProductCodesL();
for (Object lcode : lcodes) {
assertFalse(lcode instanceof Integer);
}
List<BigDecimal> bcodes = ProductDAO.selectProductCodesB();
for (Object bcode : bcodes) {
assertTrue(bcode instanceof BigDecimal);
}
}
@Test
void noErrorThrowOut() {
List<Product> products = ProductDAO.selectAllProducts();
assertEquals(4, products.size(), "should return 4 results");
}
}
|
PrimitiveResultTypeTest
|
java
|
quarkusio__quarkus
|
extensions/amazon-lambda/maven-archetype/src/main/resources/archetype-resources/src/main/java/InputObject.java
|
{
"start": 28,
"end": 445
}
|
class ____ {
private String name;
private String greeting;
public String getName() {
return name;
}
public InputObject setName(String name) {
this.name = name;
return this;
}
public String getGreeting() {
return greeting;
}
public InputObject setGreeting(String greeting) {
this.greeting = greeting;
return this;
}
}
|
InputObject
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/json/arguments/JsonSetArgs.java
|
{
"start": 588,
"end": 775
}
|
class ____ implements CompositeArgument {
private boolean nx;
private boolean xx;
/**
* Builder entry points for {@link JsonSetArgs}.
*/
public static
|
JsonSetArgs
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/taskmanager/TestCheckpointResponder.java
|
{
"start": 1405,
"end": 3336
}
|
class ____ implements CheckpointResponder {
private final List<AcknowledgeReport> acknowledgeReports;
private final List<DeclineReport> declineReports;
private OneShotLatch acknowledgeLatch;
private OneShotLatch declinedLatch;
public TestCheckpointResponder() {
this.acknowledgeReports = new ArrayList<>();
this.declineReports = new ArrayList<>();
}
@Override
public void acknowledgeCheckpoint(
JobID jobID,
ExecutionAttemptID executionAttemptID,
long checkpointId,
CheckpointMetrics checkpointMetrics,
TaskStateSnapshot subtaskState) {
AcknowledgeReport acknowledgeReport =
new AcknowledgeReport(
jobID, executionAttemptID, checkpointId, checkpointMetrics, subtaskState);
acknowledgeReports.add(acknowledgeReport);
if (acknowledgeLatch != null) {
acknowledgeLatch.trigger();
}
}
@Override
public void reportCheckpointMetrics(
JobID jobID,
ExecutionAttemptID executionAttemptID,
long checkpointId,
CheckpointMetrics checkpointMetrics) {}
@Override
public void reportInitializationMetrics(
JobID jobId,
ExecutionAttemptID executionAttemptId,
SubTaskInitializationMetrics initializationMetrics) {}
@Override
public void declineCheckpoint(
JobID jobID,
ExecutionAttemptID executionAttemptID,
long checkpointId,
CheckpointException checkpointException) {
DeclineReport declineReport =
new DeclineReport(jobID, executionAttemptID, checkpointId, checkpointException);
declineReports.add(declineReport);
if (declinedLatch != null) {
declinedLatch.trigger();
}
}
public abstract static
|
TestCheckpointResponder
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/spi-deployment/src/main/java/io/quarkus/rest/client/reactive/spi/ClientResponseFilterBuildItem.java
|
{
"start": 194,
"end": 466
}
|
class ____ extends MultiBuildItem {
private final String className;
public ClientResponseFilterBuildItem(String className) {
this.className = className;
}
public String getClassName() {
return className;
}
}
|
ClientResponseFilterBuildItem
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/parser/taobao/FloatObjectFieldTest.java
|
{
"start": 147,
"end": 377
}
|
class ____ extends TestCase {
public void test_0 () throws Exception {
VO vo = JSON.parseObject("{\"value\":1001}", VO.class);
Assert.assertTrue(1001F == vo.value);
}
public static
|
FloatObjectFieldTest
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/FailOverLoadBalanceMultipleExceptionTest.java
|
{
"start": 1253,
"end": 2888
}
|
class ____ extends ContextTestSupport {
protected MockEndpoint x;
protected MockEndpoint y;
protected MockEndpoint z;
@Override
@BeforeEach
public void setUp() throws Exception {
super.setUp();
x = getMockEndpoint("mock:x");
y = getMockEndpoint("mock:y");
z = getMockEndpoint("mock:z");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:start").loadBalance()
.failover(IllegalArgumentException.class, IOException.class, CamelException.class)
.to("direct:x", "direct:y", "direct:z");
from("direct:x").to("mock:x").process(new Processor() {
public void process(Exchange exchange) throws Exception {
throw new CamelExchangeException("Forced", exchange);
}
});
from("direct:y").to("mock:y").process(new Processor() {
public void process(Exchange exchange) throws Exception {
throw new IOException("Forced");
}
});
from("direct:z").to("mock:z");
}
};
}
@Test
public void testMultipledException() throws Exception {
x.expectedMessageCount(1);
y.expectedMessageCount(1);
z.expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
}
|
FailOverLoadBalanceMultipleExceptionTest
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/jackson/DefaultSavedRequestMixin.java
|
{
"start": 1160,
"end": 1619
}
|
class ____ also need to register {@link CookieMixin}.
*
* @author Sebastien Deleuze
* @author Jitendra Singh
* @since 7.0
* @see WebServletJacksonModule
* @see org.springframework.security.jackson.SecurityJacksonModules
*/
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS)
@JsonDeserialize(builder = DefaultSavedRequest.Builder.class)
@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.ANY, getterVisibility = JsonAutoDetect.Visibility.NONE)
abstract
|
you
|
java
|
quarkusio__quarkus
|
extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/config/runtime/exporter/OtlpExporterLogsConfig.java
|
{
"start": 139,
"end": 203
}
|
interface ____ extends OtlpExporterConfig {
}
|
OtlpExporterLogsConfig
|
java
|
quarkusio__quarkus
|
extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/devui/KubernetesDevUIProcessor.java
|
{
"start": 986,
"end": 2359
}
|
class ____ {
static volatile List<Manifest> manifests;
static final Holder holder = new Holder();
@BuildStep(onlyIf = IsDevelopment.class)
CardPageBuildItem create(CurateOutcomeBuildItem bi) {
CardPageBuildItem pageBuildItem = new CardPageBuildItem();
pageBuildItem.addPage(Page.webComponentPageBuilder()
.title("Kubernetes Manifests")
.componentLink("qwc-kubernetes-manifest.js")
.icon("font-awesome-solid:rocket"));
return pageBuildItem;
}
@BuildStep(onlyIf = IsDevelopment.class)
BuildTimeActionBuildItem createBuildTimeActions() {
BuildTimeActionBuildItem generateManifestActions = new BuildTimeActionBuildItem();
generateManifestActions.addAction("generateManifests", ignored -> {
try {
List<Manifest> manifests = holder.getManifests();
// Avoid relying on databind.
Map<String, String> map = new LinkedHashMap<>();
for (Manifest manifest : manifests) {
map.put(manifest.getName(), manifest.getContent());
}
return map;
} catch (Exception e) {
throw new RuntimeException(e);
}
});
return generateManifestActions;
}
public static final
|
KubernetesDevUIProcessor
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/execution/JobStatusChangedListenerITCase.java
|
{
"start": 12012,
"end": 12349
}
|
class ____
implements JobStatusChangedListenerFactory {
@Override
public JobStatusChangedListener createListener(Context context) {
return new TestingJobStatusChangedListener();
}
}
/** Testing job status changed listener. */
private static
|
TestingJobStatusChangedListenerFactory
|
java
|
playframework__playframework
|
core/play-guice/src/test/java/play/inject/guice/GuiceInjectorBuilderTest.java
|
{
"start": 6930,
"end": 7072
}
|
class ____ extends com.google.inject.AbstractModule {
public void configure() {
bind(A.class).to(A2.class);
}
}
public
|
A2Module
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/type/TimeAndTimestampTest.java
|
{
"start": 2105,
"end": 2211
}
|
class ____ {
@Id
private Long id;
private Time timeValue;
private Timestamp timestampValue;
}
}
|
Event
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/InstanceOfAssertFactoriesTest.java
|
{
"start": 82262,
"end": 83347
}
|
class ____ {
private final Object actual = Sets.set(123, 456, 789);
@Test
void createAssert() {
// WHEN
AbstractCollectionAssert<?, Collection<? extends Integer>, Integer, ObjectAssert<Integer>> result = set(Integer.class).createAssert(actual);
// THEN
result.contains(456, 789);
}
@ParameterizedTest
@MethodSource("valueProviders")
void createAssert_with_ValueProvider(ValueProvider<?> delegate) {
// GIVEN
ValueProvider<?> valueProvider = mockThatDelegatesTo(delegate);
// WHEN
AbstractCollectionAssert<?, Collection<? extends Integer>, Integer, ObjectAssert<Integer>> result = set(Integer.class).createAssert(valueProvider);
// THEN
result.contains(456, 789);
verify(valueProvider).apply(parameterizedType(Set.class, Integer.class));
}
private Stream<ValueProvider<?>> valueProviders() {
return Stream.of(type -> actual,
type -> convert(new String[] { "123", "456", "789" }, type));
}
}
@Nested
@TestInstance(PER_CLASS)
|
Set_Typed_Factory
|
java
|
netty__netty
|
resolver-dns/src/test/java/io/netty/resolver/dns/TestDnsServer.java
|
{
"start": 14369,
"end": 14833
}
|
class ____ extends ResourceRecordImpl {
TestResourceRecord(String domainName, RecordType recordType, Map<String, Object> attributes) {
super(domainName, recordType, RecordClass.IN, 100, attributes);
}
@Override
public int hashCode() {
return System.identityHashCode(this);
}
@Override
public boolean equals(Object o) {
return o == this;
}
}
}
|
TestResourceRecord
|
java
|
google__guice
|
core/test/com/google/inject/spi/HasDependenciesTest.java
|
{
"start": 1041,
"end": 2788
}
|
class ____ extends TestCase {
/** When an instance implements HasDependencies, the injected dependencies aren't used. */
public void testInstanceWithDependencies() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(A.class).toInstance(new AWithDependencies());
}
});
InstanceBinding<?> binding = (InstanceBinding<?>) injector.getBinding(A.class);
assertEquals(
ImmutableSet.<Dependency<?>>of(Dependency.get(Key.get(Integer.class))),
binding.getDependencies());
}
public void testInstanceWithoutDependencies() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(A.class).toInstance(new A());
}
});
InstanceBinding<?> binding = (InstanceBinding<?>) injector.getBinding(A.class);
Dependency<?> onlyDependency = Iterables.getOnlyElement(binding.getDependencies());
assertEquals(Key.get(String.class), onlyDependency.getKey());
}
public void testProvider() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(A.class).toProvider(new ProviderOfA());
}
});
ProviderInstanceBinding<?> binding = (ProviderInstanceBinding<?>) injector.getBinding(A.class);
Dependency<?> onlyDependency = Iterables.getOnlyElement(binding.getDependencies());
assertEquals(Key.get(String.class), onlyDependency.getKey());
}
static
|
HasDependenciesTest
|
java
|
apache__dubbo
|
dubbo-common/src/test/java/org/apache/dubbo/common/status/reporter/MockFrameworkStatusReporter.java
|
{
"start": 908,
"end": 1236
}
|
class ____ implements FrameworkStatusReporter {
Map<String, Object> reportContent = new HashMap<>();
@Override
public void report(String type, Object obj) {
reportContent.put(type, obj);
}
public Map<String, Object> getReportContent() {
return reportContent;
}
}
|
MockFrameworkStatusReporter
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/creators/TestCreators.java
|
{
"start": 7563,
"end": 7994
}
|
class ____ extends HashMap<Object,Object>
{
final int _number;
String _text = "initial";
MapWithCtor() { this(-1, "default"); }
@JsonCreator
public MapWithCtor(@JsonProperty("number") int nr,
@JsonProperty("text") String t)
{
_number = nr;
_text = t;
}
}
@SuppressWarnings("serial")
static
|
MapWithCtor
|
java
|
apache__spark
|
common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/Constants.java
|
{
"start": 852,
"end": 1262
}
|
class ____ {
public static final String SHUFFLE_SERVICE_FETCH_RDD_ENABLED =
"spark.shuffle.service.fetch.rdd.enabled";
/**
* The Spark config defined by the core module cannot be obtained in the current module,
* hard coding is performed here to define `SHUFFLE_SERVICE_DB_BACKEND`.
*/
public static final String SHUFFLE_SERVICE_DB_BACKEND =
"spark.shuffle.service.db.backend";
}
|
Constants
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlPartialEnablementAdOnlyIT.java
|
{
"start": 1220,
"end": 2824
}
|
class ____ extends MlSingleNodeTestCase {
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return pluginList(
LocalStateMachineLearningAdOnly.class,
DataStreamsPlugin.class,
ReindexPlugin.class,
IngestCommonPlugin.class,
MockPainlessScriptEngine.TestPlugin.class,
// ILM is required for .ml-state template index settings
IndexLifecycle.class,
// Needed for scaled_float and wildcard fields
MapperExtrasPlugin.class,
Wildcard.class
);
}
/**
* The objective here is to detect if one of these very basic actions relies on some other action that is not available.
* We don't expect them to return anything, but if they are unexpectedly calling an action that has been disabled then
* an exception will be thrown which will fail the test.
*/
public void testBasicInfoCalls() {
client().execute(MlInfoAction.INSTANCE, new MlInfoAction.Request()).actionGet();
client().execute(MlMemoryAction.INSTANCE, new MlMemoryAction.Request("*")).actionGet();
client().execute(GetJobsAction.INSTANCE, new GetJobsAction.Request("*")).actionGet();
client().execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request("*")).actionGet();
client().execute(GetDatafeedsAction.INSTANCE, new GetDatafeedsAction.Request("*")).actionGet();
client().execute(GetDatafeedsStatsAction.INSTANCE, new GetDatafeedsStatsAction.Request("*")).actionGet();
}
}
|
MlPartialEnablementAdOnlyIT
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/stateless/GetMultipleTest.java
|
{
"start": 691,
"end": 1673
}
|
class ____ {
@Test void test(SessionFactoryScope scope) {
scope.inStatelessTransaction(s-> {
s.insert(new Record(123L,"hello earth"));
s.insert(new Record(456L,"hello mars"));
});
scope.inStatelessTransaction(s-> {
List<Record> all = s.getMultiple(Record.class, List.of(456L, 123L, 2L));
assertEquals("hello mars",all.get(0).message);
assertEquals("hello earth",all.get(1).message);
assertNull(all.get(2));
});
scope.inStatelessTransaction(s-> {
List<Record> all = s.getMultiple(Record.class, List.of(123L, 2L, 456L));
assertEquals("hello earth",all.get(0).message);
assertEquals("hello mars",all.get(2).message);
assertNull(all.get(1));
});
scope.inStatelessTransaction(s-> {
List<Record> all = s.getMultiple(Record.class, List.of(456L, 123L, 2L), PESSIMISTIC_READ);
assertEquals("hello mars",all.get(0).message);
assertEquals("hello earth",all.get(1).message);
assertNull(all.get(2));
});
}
@Entity
static
|
GetMultipleTest
|
java
|
apache__camel
|
components/camel-openstack/src/main/java/org/apache/camel/component/openstack/neutron/producer/RouterProducer.java
|
{
"start": 1705,
"end": 6405
}
|
class ____ extends AbstractOpenstackProducer {
public RouterProducer(NeutronEndpoint endpoint, OSClient client) {
super(endpoint, client);
}
@Override
public void process(Exchange exchange) throws Exception {
final String operation = getOperation(exchange);
switch (operation) {
case OpenstackConstants.CREATE:
doCreate(exchange);
break;
case OpenstackConstants.GET:
doGet(exchange);
break;
case OpenstackConstants.GET_ALL:
doGetAll(exchange);
break;
case OpenstackConstants.UPDATE:
doUpdate(exchange);
break;
case OpenstackConstants.DELETE:
doDelete(exchange);
break;
case NeutronConstants.ATTACH_INTERFACE:
doAttach(exchange);
break;
case NeutronConstants.DETACH_INTERFACE:
doDetach(exchange);
break;
default:
throw new IllegalArgumentException("Unsuproutered operation " + operation);
}
}
private void doCreate(Exchange exchange) {
final Router in = messageToRouter(exchange.getIn());
final Router out = os.networking().router().create(in);
exchange.getIn().setBody(out);
}
private void doGet(Exchange exchange) {
final Message msg = exchange.getIn();
final String id
= msg.getHeader(OpenstackConstants.ID, msg.getHeader(NeutronConstants.ROUTER_ID, String.class), String.class);
StringHelper.notEmpty(id, "Router ID");
final Router result = os.networking().router().get(id);
msg.setBody(result);
}
private void doGetAll(Exchange exchange) {
final List<? extends Router> out = os.networking().router().list();
exchange.getIn().setBody(out);
}
private void doUpdate(Exchange exchange) {
final Message msg = exchange.getIn();
final Router router = messageToRouter(msg);
final Router updatedRouter = os.networking().router().update(router);
msg.setBody(updatedRouter);
}
private void doDelete(Exchange exchange) {
final Message msg = exchange.getIn();
final String id
= msg.getHeader(OpenstackConstants.ID, msg.getHeader(NeutronConstants.ROUTER_ID, String.class), String.class);
StringHelper.notEmpty(id, "Router ID");
final ActionResponse response = os.networking().router().delete(id);
checkFailure(response, exchange, "Delete router with ID " + id);
}
private void doDetach(Exchange exchange) {
final Message msg = exchange.getIn();
final String routerId = msg.getHeader(NeutronConstants.ROUTER_ID, String.class);
final String subnetId = msg.getHeader(NeutronConstants.SUBNET_ID, String.class);
final String portId = msg.getHeader(NeutronConstants.PORT_ID, String.class);
StringHelper.notEmpty(routerId, "Router ID");
RouterInterface iface = os.networking().router().detachInterface(routerId, subnetId, portId);
msg.setBody(iface);
}
private void doAttach(Exchange exchange) {
final Message msg = exchange.getIn();
final String routerId = msg.getHeader(NeutronConstants.ROUTER_ID, String.class);
final String subnetPortId
= msg.getHeader(NeutronConstants.SUBNET_ID, msg.getHeader(NeutronConstants.PORT_ID), String.class);
final AttachInterfaceType type = msg.getHeader(NeutronConstants.ITERFACE_TYPE, AttachInterfaceType.class);
StringHelper.notEmpty(routerId, "Router ID");
StringHelper.notEmpty(subnetPortId, "Subnet/Port ID");
ObjectHelper.notNull(type, "AttachInterfaceType ");
RouterInterface routerInterface = os.networking().router().attachInterface(routerId, type, subnetPortId);
msg.setBody(routerInterface);
}
private Router messageToRouter(Message message) {
Router router = message.getBody(Router.class);
if (router == null) {
Map headers = message.getHeaders();
RouterBuilder builder = Builders.router();
StringHelper.notEmpty(message.getHeader(OpenstackConstants.NAME, String.class), "Name");
builder.name(message.getHeader(OpenstackConstants.NAME, String.class));
if (headers.containsKey(NeutronConstants.TENANT_ID)) {
builder.tenantId(message.getHeader(NeutronConstants.TENANT_ID, String.class));
}
router = builder.build();
}
return router;
}
}
|
RouterProducer
|
java
|
micronaut-projects__micronaut-core
|
graal/src/main/java/io/micronaut/graal/reflect/GraalTypeElementVisitor.java
|
{
"start": 2452,
"end": 9781
}
|
class ____ implements TypeElementVisitor<Object, Object> {
/**
* The position of the visitor.
*/
public static final int POSITION = -200;
private static final TypeHint.AccessType[] DEFAULT_ACCESS_TYPE = {TypeHint.AccessType.ALL_DECLARED_CONSTRUCTORS};
private final boolean isSubclass = getClass() != GraalTypeElementVisitor.class;
/**
* Elements that the config originates from.
*/
private final Set<ClassElement> originatingElements = new HashSet<>();
@Override
public int getOrder() {
return POSITION; // allow mutation of metadata
}
@Override
public Set<String> getSupportedAnnotationNames() {
return Set.of(
ReflectiveAccess.class.getName(),
TypeHint.class.getName(),
Import.class.getName(),
"javax.persistence.Entity",
"jakarta.persistence.Entity",
AnnotationUtil.INJECT,
Inject.class.getName(),
ReflectionConfig.class.getName(),
ReflectionConfig.ReflectionConfigList.class.getName()
);
}
@NonNull
@Override
public VisitorKind getVisitorKind() {
return VisitorKind.ISOLATING;
}
@Override
public TypeElementQuery query() {
return TypeElementQuery.onlyClass();
}
@Override
public void finish(VisitorContext visitorContext) {
originatingElements.clear();
}
@SuppressWarnings("java:S3776")
@Override
public void visitClass(ClassElement element, VisitorContext context) {
if (!isSubclass && !element.hasStereotype(Deprecated.class)) {
if (originatingElements.contains(element)) {
return;
}
var reflectiveClasses = new LinkedHashMap<String, ReflectionConfigData>();
final List<AnnotationValue<ReflectionConfig>> values = element.getAnnotationValuesByType(ReflectionConfig.class);
for (AnnotationValue<ReflectionConfig> value : values) {
value.stringValue("type").ifPresent(n -> {
final ReflectionConfigData data = resolveClassData(n, reflectiveClasses);
data.accessTypes.addAll(
Arrays.asList(value.enumValues("accessType", TypeHint.AccessType.class))
);
data.methods.addAll(
value.getAnnotations("methods")
);
data.fields.addAll(
value.getAnnotations("fields")
);
});
}
if (element.hasAnnotation(ReflectiveAccess.class)) {
final String beanName = element.getName();
addBean(beanName, reflectiveClasses);
element.getDefaultConstructor().ifPresent(constructor -> processMethodElement(constructor, reflectiveClasses));
resolveClassData(beanName + "[]", reflectiveClasses);
}
element.getEnclosedElements(ElementQuery.ALL_METHODS.annotated(ann -> ann.hasAnnotation(ReflectiveAccess.class)))
.forEach(m -> processMethodElement(m, reflectiveClasses));
element.getEnclosedElements(ElementQuery.ALL_FIELDS.annotated(ann -> ann.hasAnnotation(ReflectiveAccess.class)))
.forEach(m -> processFieldElement(m, reflectiveClasses));
if (!element.isInner()) {
// Inner classes aren't processed if there is no annotation
// We might trigger the visitor twice but the originatingElements check should avoid it
element.getEnclosedElements(ElementQuery.ALL_INNER_CLASSES).forEach(c -> visitClass(c, context));
}
if (element.hasAnnotation(TypeHint.class)) {
String[] introspectedClasses = element.stringValues(TypeHint.class);
String[] typeNames = element.stringValues(TypeHint.class, "typeNames");
TypeHint.AccessType[] accessTypes = element.enumValues(TypeHint.class, "accessType", TypeHint.AccessType.class);
if (accessTypes.length == 0) {
accessTypes = DEFAULT_ACCESS_TYPE;
}
if (introspectedClasses.length == 0 && typeNames.length == 0) {
processClasses(accessTypes, reflectiveClasses, element.getName());
} else {
processClasses(accessTypes, reflectiveClasses, introspectedClasses);
processClasses(accessTypes, reflectiveClasses, typeNames);
}
}
if (element.hasAnnotation(Import.class)) {
final List<ClassElement> beanElements = BeanImportVisitor.collectInjectableElements(element, context);
for (ClassElement beanElement : beanElements) {
processBeanElement(reflectiveClasses, beanElement, true);
}
} else if (element.hasStereotype(Bean.class) || element.hasStereotype(AnnotationUtil.SCOPE) || element.hasStereotype(
AnnotationUtil.QUALIFIER)) {
processBeanElement(
reflectiveClasses,
element,
false
);
MethodElement me = element.getPrimaryConstructor().orElse(null);
if (me != null && me.isPrivate() && !me.hasAnnotation(ReflectiveAccess.class)) {
processMethodElement(me, reflectiveClasses);
}
}
if (element.isInner()) {
ClassElement enclosingType = element.getEnclosingType().orElse(null);
if (enclosingType != null && enclosingType.hasAnnotation(ReflectiveAccess.class)) {
final String beanName = element.getName();
addBean(beanName, reflectiveClasses);
resolveClassData(beanName + "[]", reflectiveClasses);
}
}
if (!reflectiveClasses.isEmpty()) {
originatingElements.add(element);
@SuppressWarnings("unchecked") final AnnotationValue<ReflectionConfig>[] annotationValues =
reflectiveClasses.values().stream()
.map(ReflectionConfigData::build)
.toArray(AnnotationValue[]::new);
MutableAnnotationMetadata annotationMetadata = new MutableAnnotationMetadata();
final AnnotationValue<ReflectionConfig.ReflectionConfigList> av =
AnnotationValue.builder(ReflectionConfig.ReflectionConfigList.class)
.values(annotationValues)
.build();
annotationMetadata.addAnnotation(
av.getAnnotationName(),
av.getValues(),
RetentionPolicy.RUNTIME
);
GraalReflectionMetadataWriter writer = new GraalReflectionMetadataWriter(
element,
annotationMetadata,
context
);
try {
writer.accept(context);
} catch (IOException e) {
throw new ClassGenerationException("I/O error occurred during
|
GraalTypeElementVisitor
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/blockloader/docvalues/AbstractIntsFromDocValuesBlockLoader.java
|
{
"start": 821,
"end": 2076
}
|
class ____ extends BlockDocValuesReader.DocValuesBlockLoader {
protected final String fieldName;
protected AbstractIntsFromDocValuesBlockLoader(String fieldName) {
this.fieldName = fieldName;
}
@Override
public final Builder builder(BlockFactory factory, int expectedCount) {
return factory.ints(expectedCount);
}
@Override
public final AllReader reader(LeafReaderContext context) throws IOException {
SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName);
if (docValues != null) {
NumericDocValues singleton = DocValues.unwrapSingleton(docValues);
if (singleton != null) {
return singletonReader(singleton);
}
return sortedReader(docValues);
}
NumericDocValues singleton = context.reader().getNumericDocValues(fieldName);
if (singleton != null) {
return singletonReader(singleton);
}
return new ConstantNullsReader();
}
protected abstract AllReader singletonReader(NumericDocValues docValues);
protected abstract AllReader sortedReader(SortedNumericDocValues docValues);
public static
|
AbstractIntsFromDocValuesBlockLoader
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/BitField.java
|
{
"start": 2889,
"end": 10313
}
|
class ____ {
private final int mask;
private final int shiftCount;
/**
* Creates a BitField instance.
*
* @param mask the mask specifying which bits apply to this
* BitField. Bits that are set in this mask are the bits
* that this BitField operates on
*/
public BitField(final int mask) {
this.mask = mask;
this.shiftCount = mask == 0 ? 0 : Integer.numberOfTrailingZeros(mask);
}
/**
* Clears the bits.
*
* @param holder the int data containing the bits we're
* interested in
* @return the value of holder with the specified bits cleared
* (set to {@code 0})
*/
public int clear(final int holder) {
return holder & ~mask;
}
/**
* Clears the bits.
*
* @param holder the byte data containing the bits we're
* interested in
*
* @return the value of holder with the specified bits cleared
* (set to {@code 0})
*/
public byte clearByte(final byte holder) {
return (byte) clear(holder);
}
/**
* Clears the bits.
*
* @param holder the short data containing the bits we're
* interested in
* @return the value of holder with the specified bits cleared
* (set to {@code 0})
*/
public short clearShort(final short holder) {
return (short) clear(holder);
}
/**
* Obtains the value for the specified BitField, unshifted.
*
* @param holder the int data containing the bits we're
* interested in
* @return the selected bits
*/
public int getRawValue(final int holder) {
return holder & mask;
}
/**
* Obtains the value for the specified BitField, unshifted.
*
* @param holder the short data containing the bits we're
* interested in
* @return the selected bits
*/
public short getShortRawValue(final short holder) {
return (short) getRawValue(holder);
}
/**
* Obtains the value for the specified BitField, appropriately
* shifted right, as a short.
*
* <p>Many users of a BitField will want to treat the specified
* bits as an int value, and will not want to be aware that the
* value is stored as a BitField (and so shifted left so many
* bits).</p>
*
* @see #setShortValue(short,short)
* @param holder the short data containing the bits we're
* interested in
* @return the selected bits, shifted right appropriately
*/
public short getShortValue(final short holder) {
return (short) getValue(holder);
}
/**
* Obtains the value for the specified BitField, appropriately
* shifted right.
*
* <p>Many users of a BitField will want to treat the specified
* bits as an int value, and will not want to be aware that the
* value is stored as a BitField (and so shifted left so many
* bits).</p>
*
* @see #setValue(int,int)
* @param holder the int data containing the bits we're interested
* in
* @return the selected bits, shifted right appropriately
*/
public int getValue(final int holder) {
return getRawValue(holder) >> shiftCount;
}
/**
* Tests whether all of the bits are set or not.
*
* <p>This is a stricter test than {@link #isSet(int)},
* in that all of the bits in a multi-bit set must be set
* for this method to return {@code true}.</p>
*
* @param holder the int data containing the bits we're
* interested in
* @return {@code true} if all of the bits are set,
* else {@code false}
*/
public boolean isAllSet(final int holder) {
return (holder & mask) == mask;
}
/**
* Tests whether the field is set or not.
*
* <p>This is most commonly used for a single-bit field, which is
* often used to represent a boolean value; the results of using
* it for a multi-bit field is to determine whether *any* of its
* bits are set.</p>
*
* @param holder the int data containing the bits we're interested
* in
* @return {@code true} if any of the bits are set,
* else {@code false}
*/
public boolean isSet(final int holder) {
return (holder & mask) != 0;
}
/**
* Sets the bits.
*
* @param holder the int data containing the bits we're
* interested in
* @return the value of holder with the specified bits set
* to {@code 1}
*/
public int set(final int holder) {
return holder | mask;
}
/**
* Sets a boolean BitField.
*
* @param holder the int data containing the bits we're
* interested in
* @param flag indicating whether to set or clear the bits
* @return the value of holder with the specified bits set or
* cleared
*/
public int setBoolean(final int holder, final boolean flag) {
return flag ? set(holder) : clear(holder);
}
/**
* Sets the bits.
*
* @param holder the byte data containing the bits we're
* interested in
*
* @return the value of holder with the specified bits set
* to {@code 1}
*/
public byte setByte(final byte holder) {
return (byte) set(holder);
}
/**
* Sets a boolean BitField.
*
* @param holder the byte data containing the bits we're
* interested in
* @param flag indicating whether to set or clear the bits
* @return the value of holder with the specified bits set or
* cleared
*/
public byte setByteBoolean(final byte holder, final boolean flag) {
return flag ? setByte(holder) : clearByte(holder);
}
/**
* Sets the bits.
*
* @param holder the short data containing the bits we're
* interested in
* @return the value of holder with the specified bits set
* to {@code 1}
*/
public short setShort(final short holder) {
return (short) set(holder);
}
/**
* Sets a boolean BitField.
*
* @param holder the short data containing the bits we're
* interested in
* @param flag indicating whether to set or clear the bits
* @return the value of holder with the specified bits set or
* cleared
*/
public short setShortBoolean(final short holder, final boolean flag) {
return flag ? setShort(holder) : clearShort(holder);
}
/**
* Replaces the bits with new values.
*
* @see #getShortValue(short)
* @param holder the short data containing the bits we're
* interested in
* @param value the new value for the specified bits
* @return the value of holder with the bits from the value
* parameter replacing the old bits
*/
public short setShortValue(final short holder, final short value) {
return (short) setValue(holder, value);
}
/**
* Replaces the bits with new values.
*
* @see #getValue(int)
* @param holder the int data containing the bits we're
* interested in
* @param value the new value for the specified bits
* @return the value of holder with the bits from the value
* parameter replacing the old bits
*/
public int setValue(final int holder, final int value) {
return holder & ~mask | value << shiftCount & mask;
}
}
|
BitField
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/test/java/io/vertx/tests/http/VirtualThreadHttpTest.java
|
{
"start": 1126,
"end": 5440
}
|
class ____ extends VertxTestBase {
private VertxInternal vertx;
public void setUp() throws Exception {
super.setUp();
vertx = (VertxInternal) super.vertx;
}
@Test
public void testHttpClient1() throws Exception {
Assume.assumeTrue(isVirtualThreadAvailable());
HttpServer server = vertx.createHttpServer();
server.requestHandler(req -> {
req.response().end("Hello World");
});
server.listen(8088, "localhost").await(10, TimeUnit.SECONDS);
vertx.createVirtualThreadContext().runOnContext(v -> {
HttpClient client = vertx.createHttpClient();
for (int i = 0; i < 100; ++i) {
HttpClientRequest req = client.request(HttpMethod.GET, 8088, "localhost", "/").await();
HttpClientResponse resp = req.send().await();
Buffer body = resp.body().await();
String bodyString = body.toString(StandardCharsets.UTF_8);
assertEquals("Hello World", bodyString);
}
testComplete();
});
await();
}
@Test
public void testHttpClient2() throws Exception {
Assume.assumeTrue(isVirtualThreadAvailable());
waitFor(100);
HttpServer server = vertx.createHttpServer();
server.requestHandler(req -> {
req.response().end("Hello World");
});
server.listen(8088, "localhost").await(10, TimeUnit.SECONDS);
HttpClient client = vertx.createHttpClient();
vertx.createVirtualThreadContext().runOnContext(v -> {
for (int i = 0; i < 100; ++i) {
client.request(HttpMethod.GET, 8088, "localhost", "/").onSuccess(req -> {
HttpClientResponse resp = req.send().await();
StringBuffer body = new StringBuffer();
resp.handler(buff -> {
body.append(buff.toString());
});
resp.endHandler(v2 -> {
assertEquals("Hello World", body.toString());
complete();
});
});
}
});
try {
await();
} finally {
server.close().await();
client.close().await();
}
}
@Test
public void testHttpClient3() throws Exception {
Assume.assumeTrue(isVirtualThreadAvailable());
HttpServer server = vertx.createHttpServer();
int numChunks = 10;
List<String> expected = IntStream.range(0, numChunks).mapToObj(idx -> "chunk-" + idx).collect(Collectors.toList());
server.requestHandler(req -> {
HttpServerResponse response = req.response();
response.setChunked(true);
Deque<String> toSend = new ArrayDeque<>(expected);
vertx.setPeriodic(10, id -> {
String chunk = toSend.poll();
if (chunk != null) {
response.write(chunk);
} else {
vertx.cancelTimer(id);
response.end();
}
});
});
server.listen(8088, "localhost").await(10, TimeUnit.SECONDS);
vertx.createVirtualThreadContext().runOnContext(v -> {
HttpClient client = vertx.createHttpClient();
for (int i = 0; i < 10; ++i) {
HttpClientRequest req = client.request(HttpMethod.GET, 8088, "localhost", "/").await();
HttpClientResponse resp = req.send().await();
List<String> chunks = new ArrayList<>();
resp.blockingStream().forEach(chunk -> {
chunks.add(chunk.toString());
});
assertEquals(expected, chunks);
}
testComplete();
});
await();
}
@Test
public void testHttpClientTimeout() throws Exception {
Assume.assumeTrue(isVirtualThreadAvailable());
HttpServer server = vertx.createHttpServer();
server.requestHandler(req -> {
});
server.listen(8088, "localhost").await(10, TimeUnit.SECONDS);
vertx.createVirtualThreadContext().runOnContext(v -> {
HttpClient client = vertx.createHttpClient();
ContextInternal ctx = vertx.getOrCreateContext();
HttpClientRequest req = client.request(HttpMethod.GET, 8088, "localhost", "/").await();
PromiseInternal<HttpClientResponse> promise = ctx.promise();
req.send().onComplete(promise);
Exception failure = new Exception("Too late");
vertx.setTimer(500, id -> promise.tryFail(failure));
try {
HttpClientResponse resp = promise.future().await();
} catch (Exception e) {
assertSame(failure, e);
testComplete();
}
});
await();
}
}
|
VirtualThreadHttpTest
|
java
|
quarkusio__quarkus
|
docs/src/main/java/io/quarkus/docs/generation/AssembleDownstreamDocumentation.java
|
{
"start": 24937,
"end": 27025
}
|
enum ____, group(2) is the tooltip text for the value
if (mr.group(3) != null) {
// group(3) is a comma that means there are still more values after this one
// So in this case, replace it with two newlines to visually separate items
return "*" + mr.group(1) + "*: " + mr.group(2) + "\n\n";
} else {
return "*" + mr.group(1) + "*: " + mr.group(2);
}
});
return content;
}
private static String escapeXrefTitleForReplaceAll(String title) {
return title.trim().replace("]", "\\\\]");
}
private static String trimReference(String reference) {
reference = normalizeAdoc(reference);
if (reference.startsWith("#")) {
return reference.substring(1);
}
if (reference.contains(".adoc")) {
return reference;
}
if (reference.contains("#")) {
int hashIndex = reference.indexOf('#');
return reference.substring(0, hashIndex) + ".adoc" + reference.substring(hashIndex);
}
return reference;
}
private static String getQualifiedReference(String fileName, String reference) {
reference = normalizeAdoc(reference);
if (reference.startsWith("#")) {
return fileName + reference;
}
if (reference.contains(".adoc")) {
return reference;
}
if (reference.contains("#")) {
int hashIndex = reference.indexOf('#');
return reference.substring(0, hashIndex) + ".adoc" + reference.substring(hashIndex);
}
return fileName + "#" + reference;
}
private static String normalizeAdoc(String adoc) {
if (adoc.startsWith("./")) {
return adoc.substring(2);
}
return adoc;
}
private static void addError(Map<String, List<String>> errors, String fileName, String error) {
errors.computeIfAbsent(fileName, f -> new ArrayList<>())
.add(error);
}
public static
|
value
|
java
|
netty__netty
|
example/src/main/java/io/netty/example/telnet/TelnetServer.java
|
{
"start": 1129,
"end": 1971
}
|
class ____ {
static final boolean SSL = System.getProperty("ssl") != null;
static final int PORT = Integer.parseInt(System.getProperty("port", SSL? "8992" : "8023"));
public static void main(String[] args) throws Exception {
// Configure SSL.
final SslContext sslCtx = ServerUtil.buildSslContext();
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
try {
ServerBootstrap b = new ServerBootstrap();
b.group(group)
.channel(NioServerSocketChannel.class)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new TelnetServerInitializer(sslCtx));
b.bind(PORT).sync().channel().closeFuture().sync();
} finally {
group.shutdownGracefully();
}
}
}
|
TelnetServer
|
java
|
quarkusio__quarkus
|
extensions/security/runtime/src/main/java/io/quarkus/security/runtime/interceptor/AuthenticatedInterceptor.java
|
{
"start": 503,
"end": 877
}
|
class ____ {
@Inject
SecurityHandler handler;
@Inject
AuthorizationController controller;
@AroundInvoke
public Object intercept(InvocationContext ic) throws Exception {
if (controller.isAuthorizationEnabled()) {
return handler.handle(ic);
} else {
return ic.proceed();
}
}
}
|
AuthenticatedInterceptor
|
java
|
spring-projects__spring-boot
|
configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/immutable/ImmutableSimpleProperties.java
|
{
"start": 1262,
"end": 2035
}
|
interface ____ still be injected because it might have a converter
private final Comparator<?> comparator;
// Even if it is not exposed, we're still offering a way to bind the value through the
// constructor, so it should be present in the metadata
@SuppressWarnings("unused")
private final Long counter;
@TestConstructorBinding
public ImmutableSimpleProperties(@TestDefaultValue("boot") String theName, boolean flag, Comparator<?> comparator,
Long counter) {
this.theName = theName;
this.flag = flag;
this.comparator = comparator;
this.counter = counter;
}
public String getTheName() {
return this.theName;
}
@Deprecated
public boolean isFlag() {
return this.flag;
}
public Comparator<?> getComparator() {
return this.comparator;
}
}
|
can
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/query/ScriptScoreQueryBuilderTests.java
|
{
"start": 1627,
"end": 7414
}
|
class ____ extends AbstractQueryTestCase<ScriptScoreQueryBuilder> {
@Override
protected ScriptScoreQueryBuilder doCreateTestQueryBuilder() {
String scriptStr = "1";
Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, scriptStr, Collections.emptyMap());
ScriptScoreQueryBuilder queryBuilder = new ScriptScoreQueryBuilder(RandomQueryBuilder.createQuery(random()), script);
if (randomBoolean()) {
queryBuilder.setMinScore(randomFloat());
}
return queryBuilder;
}
@Override
protected ScriptScoreQueryBuilder createQueryWithInnerQuery(QueryBuilder queryBuilder) {
Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "1", Collections.emptyMap());
return new ScriptScoreQueryBuilder(queryBuilder, script);
}
@Override
protected void doAssertLuceneQuery(ScriptScoreQueryBuilder queryBuilder, Query query, SearchExecutionContext context)
throws IOException {
Query wrappedQuery = queryBuilder.query().rewrite(context).toQuery(context);
if (wrappedQuery instanceof MatchNoDocsQuery) {
assertThat(query, instanceOf(MatchNoDocsQuery.class));
} else {
assertThat(query, instanceOf(ScriptScoreQuery.class));
}
}
public void testFromJson() throws IOException {
String json = """
{
"script_score" : {
"query" : { "match_all" : {} },
"script" : {
"source" : "doc['field'].value"
},
"min_score" : 2.0
}
}""";
ScriptScoreQueryBuilder parsed = (ScriptScoreQueryBuilder) parseQuery(json);
assertEquals(json, 2, parsed.getMinScore(), 0.0001);
}
public void testIllegalArguments() {
String scriptStr = "1";
Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, scriptStr, Collections.emptyMap());
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new ScriptScoreQueryBuilder(matchAllQuery(), null));
assertEquals("script_score: script must not be null", e.getMessage());
e = expectThrows(IllegalArgumentException.class, () -> new ScriptScoreQueryBuilder(null, script));
assertEquals("script_score: query must not be null", e.getMessage());
}
/**
* Check that this query is cacheable
*/
@Override
public void testCacheability() throws IOException {
Directory directory = newDirectory();
RandomIndexWriter iw = new RandomIndexWriter(random(), directory);
iw.addDocument(new Document());
final IndexSearcher searcher = newSearcher(iw.getReader());
iw.close();
assertThat(searcher.getIndexReader().leaves().size(), greaterThan(0));
Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "1", Collections.emptyMap());
ScriptScoreQueryBuilder queryBuilder = new ScriptScoreQueryBuilder(new TermQueryBuilder(KEYWORD_FIELD_NAME, "value"), script);
SearchExecutionContext context = createSearchExecutionContext(searcher);
QueryBuilder rewriteQuery = rewriteQuery(queryBuilder, new SearchExecutionContext(context));
Query luceneQuery = rewriteQuery.toQuery(context);
assertNotNull(luceneQuery);
assertTrue("query should be cacheable: " + queryBuilder.toString(), context.isCacheable());
// test query cache
if (rewriteQuery instanceof MatchNoneQueryBuilder == false) {
Weight queryWeight = context.searcher().createWeight(searcher.rewrite(luceneQuery), ScoreMode.COMPLETE, 1.0f);
for (LeafReaderContext ctx : context.getIndexReader().leaves()) {
assertFalse("" + searcher.rewrite(luceneQuery) + " " + rewriteQuery.toString(), queryWeight.isCacheable(ctx));
}
}
searcher.getIndexReader().close();
directory.close();
}
@Override
public void testMustRewrite() throws IOException {
SearchExecutionContext context = createSearchExecutionContext();
context.setAllowUnmappedFields(true);
TermQueryBuilder termQueryBuilder = new TermQueryBuilder("unmapped_field", "foo");
String scriptStr = "1";
Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, scriptStr, Collections.emptyMap());
ScriptScoreQueryBuilder scriptScoreQueryBuilder = new ScriptScoreQueryBuilder(termQueryBuilder, script);
IllegalStateException e = expectThrows(IllegalStateException.class, () -> scriptScoreQueryBuilder.toQuery(context));
assertEquals("Rewrite first", e.getMessage());
}
public void testRewriteToMatchNone() throws IOException {
Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "1", Collections.emptyMap());
ScriptScoreQueryBuilder builder = new ScriptScoreQueryBuilder(new TermQueryBuilder("unmapped_field", "value"), script);
QueryBuilder rewrite = builder.rewrite(createSearchExecutionContext());
assertThat(rewrite, instanceOf(MatchNoneQueryBuilder.class));
}
public void testDisallowExpensiveQueries() {
SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class);
when(searchExecutionContext.allowExpensiveQueries()).thenReturn(false);
ScriptScoreQueryBuilder queryBuilder = doCreateTestQueryBuilder();
ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> queryBuilder.toQuery(searchExecutionContext));
assertEquals("[script score] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage());
}
}
|
ScriptScoreQueryBuilderTests
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/state/TestStreamStateHandle.java
|
{
"start": 992,
"end": 1213
}
|
interface ____ extends StreamStateHandle {
default PhysicalStateHandleID getStreamStateHandleID() {
return new PhysicalStateHandleID(Integer.toString(System.identityHashCode(this)));
}
}
|
TestStreamStateHandle
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsApacheHttpClient.java
|
{
"start": 2382,
"end": 9174
}
|
class ____ implements Closeable {
/**
* ApacheHttpClient instance that executes HTTP request.
*/
private final CloseableHttpClient httpClient;
/**
* Flag to indicate if the client is usable. This is a JVM level flag, state of
* this flag is shared across all instances of fileSystems. Once switched off,
* the ApacheHttpClient would not be used for whole JVM lifecycle.
*/
private static boolean usable = true;
/**
* Registers the switch off of ApacheHttpClient for all future use in the JVM.
*/
static void registerFallback() {
usable = false;
}
/**
* In case, getting success response from apache client, sets the usable flag to true.
*/
static void setUsable() {
usable = true;
}
/**
* @return if ApacheHttpClient is usable.
*/
static boolean usable() {
return usable;
}
AbfsApacheHttpClient(DelegatingSSLSocketFactory delegatingSSLSocketFactory,
final AbfsConfiguration abfsConfiguration,
final KeepAliveCache keepAliveCache,
URL baseUrl,
final boolean isCacheWarmupNeeded) {
final AbfsConnectionManager connMgr = new AbfsConnectionManager(
createSocketFactoryRegistry(
new SSLConnectionSocketFactory(delegatingSSLSocketFactory,
getDefaultHostnameVerifier())),
new AbfsHttpClientConnectionFactory(), keepAliveCache,
abfsConfiguration, baseUrl, isCacheWarmupNeeded);
final HttpClientBuilder builder = HttpClients.custom();
builder.setConnectionManager(connMgr)
.setRequestExecutor(
// In case of Expect:100-continue, the timeout for waiting for
// the 100-continue response from the server is set using
// ExpectWaitContinueTimeout. For other requests, the read timeout
// is set using SocketTimeout.
new AbfsManagedHttpRequestExecutor(
abfsConfiguration.isExpectHeaderEnabled()
? abfsConfiguration.getExpect100ContinueWaitTimeout()
: abfsConfiguration.getHttpReadTimeout()))
.disableContentCompression()
.disableRedirectHandling()
.disableAutomaticRetries()
/*
* To prevent the read of system property http.agent. The agent is set
* in request headers by AbfsClient. System property read is an
* overhead.
*/
.setUserAgent(EMPTY_STRING);
httpClient = builder.build();
}
@Override
public void close() throws IOException {
if (httpClient != null) {
httpClient.close();
}
}
/**
* Executes the HTTP request.
*
* @param httpRequest HTTP request to execute.
* @param abfsHttpClientContext HttpClient context.
* @param connectTimeout Connection timeout.
* @param readTimeout Read timeout.
*
* @return HTTP response.
* @throws IOException network error.
*/
public HttpResponse execute(HttpRequestBase httpRequest,
final AbfsManagedHttpClientContext abfsHttpClientContext,
final int connectTimeout,
final int readTimeout,
final long tailLatencyTimeout) throws IOException {
if (tailLatencyTimeout <= 0) {
return executeWithoutDeadline(httpRequest, abfsHttpClientContext,
connectTimeout, readTimeout);
}
return executeWithDeadline(httpRequest, abfsHttpClientContext,
connectTimeout, readTimeout, tailLatencyTimeout);
}
/**
* Executes the HTTP request.
*
* @param httpRequest HTTP request to execute.
* @param abfsHttpClientContext HttpClient context.
* @param connectTimeout Connection timeout.
* @param readTimeout Read timeout.
*
* @return HTTP response.
* @throws IOException network error.
*/
private HttpResponse executeWithoutDeadline(HttpRequestBase httpRequest,
final AbfsManagedHttpClientContext abfsHttpClientContext,
final int connectTimeout,
final int readTimeout) throws IOException {
RequestConfig.Builder requestConfigBuilder = RequestConfig
.custom()
.setConnectTimeout(connectTimeout)
.setSocketTimeout(readTimeout);
httpRequest.setConfig(requestConfigBuilder.build());
return httpClient.execute(httpRequest, abfsHttpClientContext);
}
/**
* Executes the HTTP request with a deadline. If the request does not complete
* within the deadline, it is aborted and an IOException is thrown.
*
* @param httpRequest HTTP request to execute.
* @param abfsHttpClientContext HttpClient context.
* @param connectTimeout Connection timeout.
* @param readTimeout Read timeout.
* @param deadlineMillis Deadline in milliseconds.
*
* @return HTTP response.
* @throws IOException network error or deadline exceeded.
*/
private HttpResponse executeWithDeadline(HttpRequestBase httpRequest,
final AbfsManagedHttpClientContext abfsHttpClientContext,
final int connectTimeout,
final int readTimeout,
final long deadlineMillis) throws IOException {
RequestConfig.Builder requestConfigBuilder = RequestConfig
.custom()
.setConnectTimeout(connectTimeout)
.setSocketTimeout(readTimeout);
httpRequest.setConfig(requestConfigBuilder.build());
ExecutorService executor = Executors.newSingleThreadExecutor();
Future<HttpResponse> future = executor.submit(() ->
httpClient.execute(httpRequest, abfsHttpClientContext)
);
try {
return future.get(deadlineMillis, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
/* Deadline exceeded, abort the request.
* This will also kill the underlying socket exception in the HttpClient.
* Connection will be marked stale and won't be returned back to KAC for reuse.
*/
httpRequest.abort();
throw new TailLatencyRequestTimeoutException(e);
} catch (Exception e) {
// Any other exception from execution should be thrown as IOException.
throw new IOException("Request execution with deadline failed", e);
} finally {
executor.shutdownNow();
}
}
/**
* Creates the socket factory registry for HTTP and HTTPS.
*
* @param sslSocketFactory SSL socket factory.
* @return Socket factory registry.
*/
private Registry<ConnectionSocketFactory> createSocketFactoryRegistry(
ConnectionSocketFactory sslSocketFactory) {
if (sslSocketFactory == null) {
return RegistryBuilder.<ConnectionSocketFactory>create()
.register(HTTP_SCHEME,
PlainConnectionSocketFactory.getSocketFactory())
.build();
}
return RegistryBuilder.<ConnectionSocketFactory>create()
.register(HTTP_SCHEME, PlainConnectionSocketFactory.getSocketFactory())
.register(HTTPS_SCHEME, sslSocketFactory)
.build();
}
}
|
AbfsApacheHttpClient
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LeafQueueTemplateInfo.java
|
{
"start": 1545,
"end": 1661
}
|
class ____ the LeafQueue Template configuration.
*/
@XmlRootElement
@XmlAccessorType(XmlAccessType.FIELD)
public
|
stores
|
java
|
apache__maven
|
compat/maven-model-builder/src/main/java/org/apache/maven/model/interpolation/reflection/ClassMap.java
|
{
"start": 2069,
"end": 3732
}
|
class ____ whose methods are cached by this map.
*/
Class<?> getCachedClass() {
return clazz;
}
/**
* <p>Find a Method using the methodKey provided.</p>
* <p>Look in the methodMap for an entry. If found,
* it'll either be a CACHE_MISS, in which case we
* simply give up, or it'll be a Method, in which
* case, we return it.</p>
* <p>If nothing is found, then we must actually go
* and introspect the method from the MethodMap.</p>
* @param name Method name.
* @param params Method parameters.
* @return The found method.
* @throws MethodMap.AmbiguousException in case of duplicate methods.
*/
public Method findMethod(String name, Object... params) throws MethodMap.AmbiguousException {
String methodKey = makeMethodKey(name, params);
Object cacheEntry = methodCache.get(methodKey);
if (cacheEntry == CACHE_MISS) {
return null;
}
if (cacheEntry == null) {
try {
cacheEntry = methodMap.find(name, params);
} catch (MethodMap.AmbiguousException ae) {
// that's a miss :)
methodCache.put(methodKey, CACHE_MISS);
throw ae;
}
if (cacheEntry == null) {
methodCache.put(methodKey, CACHE_MISS);
} else {
methodCache.put(methodKey, cacheEntry);
}
}
// Yes, this might just be null.
return (Method) cacheEntry;
}
/**
* Populate the Map of direct hits. These
* are taken from all the public methods
* that our
|
object
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNativeAction.java
|
{
"start": 874,
"end": 1941
}
|
class ____ extends BaseRestHandler {
private static final String CONNECTOR_ID_PARAM = "connector_id";
@Override
public String getName() {
return "connector_update_native_action";
}
@Override
public List<Route> routes() {
return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_native"));
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
try (XContentParser parser = restRequest.contentParser()) {
UpdateConnectorNativeAction.Request request = UpdateConnectorNativeAction.Request.fromXContent(
parser,
restRequest.param(CONNECTOR_ID_PARAM)
);
return channel -> client.execute(
UpdateConnectorNativeAction.INSTANCE,
request,
new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status)
);
}
}
}
|
RestUpdateConnectorNativeAction
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/QueryTimeOutTest.java
|
{
"start": 1709,
"end": 8421
}
|
class ____ extends BaseSessionFactoryFunctionalTest {
private static final PreparedStatementSpyConnectionProvider CONNECTION_PROVIDER =
new PreparedStatementSpyConnectionProvider();
private static final String QUERY = "update AnEntity set name='abc'";
private String expectedSqlQuery;
@Override
protected Class[] getAnnotatedClasses() {
return new Class[] {AnEntity.class};
}
@Override
protected void applySettings(StandardServiceRegistryBuilder builer) {
ConnectionProvider connectionProvider = (ConnectionProvider) builer.getSettings()
.get( AvailableSettings.CONNECTION_PROVIDER );
CONNECTION_PROVIDER.setConnectionProvider( connectionProvider );
builer.applySetting( AvailableSettings.CONNECTION_PROVIDER, CONNECTION_PROVIDER );
}
@BeforeEach
public void before() {
CONNECTION_PROVIDER.clear();
SessionFactoryImplementor sessionFactoryImplementor = sessionFactory();
final JdbcType jdbcType = sessionFactoryImplementor.getTypeConfiguration().getJdbcTypeRegistry()
.getDescriptor(
Types.VARCHAR
);
final String baseQuery;
if ( DialectContext.getDialect() instanceof OracleDialect ) {
baseQuery = "update AnEntity ae1_0 set ae1_0.name=?";
}
else if ( DialectContext.getDialect() instanceof SybaseDialect ) {
baseQuery = "update AnEntity set name=? from AnEntity ae1_0";
}
else if ( DialectContext.getDialect() instanceof AbstractTransactSQLDialect ) {
baseQuery = "update ae1_0 set name=? from AnEntity ae1_0";
}
else if ( DialectContext.getDialect()
.getDmlTargetColumnQualifierSupport() == DmlTargetColumnQualifierSupport.NONE ) {
baseQuery = "update AnEntity set name=?";
}
else {
baseQuery = "update AnEntity ae1_0 set name=?";
}
expectedSqlQuery = baseQuery.replace(
"?",
jdbcType.getJdbcLiteralFormatter( StringJavaType.INSTANCE )
.toJdbcLiteral(
"abc",
sessionFactoryImplementor.getJdbcServices().getDialect(),
sessionFactoryImplementor.getWrapperOptions()
)
);
}
@Test
@JiraKey(value = "HHH-12075")
public void testCreateQuerySetTimeout() {
inTransaction( session -> {
MutationQuery query = session.createMutationQuery( QUERY );
query.setTimeout( 123 );
query.executeUpdate();
try {
List<Object[]> setQueryTimeoutCalls = CONNECTION_PROVIDER.spyContext.getCalls(
Statement.class.getMethod( "setQueryTimeout", int.class ),
CONNECTION_PROVIDER.getPreparedStatement( expectedSqlQuery )
);
assertEquals( 2, setQueryTimeoutCalls.size() );
assertEquals( 123, setQueryTimeoutCalls.get( 0 )[0] );
assertEquals( 0, setQueryTimeoutCalls.get( 1 )[0] );
}
catch (Exception ex) {
fail( "should not have thrown exceptioinTransaction( session -> {n" );
}
}
);
}
@Test
@JiraKey(value = "HHH-12075")
public void testCreateQuerySetTimeoutHint() {
inTransaction( session -> {
MutationQuery query = session.createMutationQuery( QUERY );
query.setHint( HINT_SPEC_QUERY_TIMEOUT, 123000 );
query.executeUpdate();
try {
List<Object[]> setQueryTimeoutCalls = CONNECTION_PROVIDER.spyContext.getCalls(
Statement.class.getMethod( "setQueryTimeout", int.class ),
CONNECTION_PROVIDER.getPreparedStatement( expectedSqlQuery )
);
assertEquals( 2, setQueryTimeoutCalls.size() );
assertEquals( 123, setQueryTimeoutCalls.get( 0 )[0] );
assertEquals( 0, setQueryTimeoutCalls.get( 1 )[0] );
}
catch (Exception ex) {
fail( "should not have thrown exception" );
}
}
);
}
@Test
@JiraKey(value = "HHH-12075")
public void testCreateNativeQuerySetTimeout() {
inTransaction( session -> {
NativeQuery query = session.createNativeQuery( QUERY );
query.setTimeout( 123 );
query.executeUpdate();
try {
List<Object[]> setQueryTimeoutCalls = CONNECTION_PROVIDER.spyContext.getCalls(
Statement.class.getMethod( "setQueryTimeout", int.class ),
CONNECTION_PROVIDER.getPreparedStatement( QUERY )
);
assertEquals( 2, setQueryTimeoutCalls.size() );
assertEquals( 123, setQueryTimeoutCalls.get( 0 )[0] );
assertEquals( 0, setQueryTimeoutCalls.get( 1 )[0] );
}
catch (Exception ex) {
fail( "should not have thrown exception" );
}
}
);
}
@Test
@JiraKey(value = "HHH-12075")
public void testCreateNativeQuerySetTimeoutHint() {
inTransaction( session -> {
NativeQuery query = session.createNativeQuery( QUERY );
query.setHint( HINT_SPEC_QUERY_TIMEOUT, 123000 );
query.executeUpdate();
try {
List<Object[]> setQueryTimeoutCalls = CONNECTION_PROVIDER.spyContext.getCalls(
Statement.class.getMethod( "setQueryTimeout", int.class ),
CONNECTION_PROVIDER.getPreparedStatement( QUERY )
);
assertEquals( 2, setQueryTimeoutCalls.size() );
assertEquals( 123, setQueryTimeoutCalls.get( 0 )[0] );
assertEquals( 0, setQueryTimeoutCalls.get( 1 )[0] );
}
catch (Exception ex) {
fail( "should not have thrown exception" );
}
}
);
}
@Test
@JiraKey(value = "HHH-12075")
public void testCreateSQLQuerySetTimeout() {
inTransaction( session -> {
NativeQuery query = session.createNativeQuery( QUERY );
query.setTimeout( 123 );
query.executeUpdate();
try {
List<Object[]> setQueryTimeoutCalls = CONNECTION_PROVIDER.spyContext.getCalls(
Statement.class.getMethod( "setQueryTimeout", int.class ),
CONNECTION_PROVIDER.getPreparedStatement( QUERY )
);
assertEquals( 2, setQueryTimeoutCalls.size() );
assertEquals( 123, setQueryTimeoutCalls.get( 0 )[0] );
assertEquals( 0, setQueryTimeoutCalls.get( 1 )[0] );
}
catch (Exception ex) {
fail( "should not have thrown exception" );
}
}
);
}
@Test
@JiraKey(value = "HHH-12075")
public void testCreateSQLQuerySetTimeoutHint() {
inTransaction( session -> {
NativeQuery query = session.createNativeQuery( QUERY );
query.setHint( HINT_SPEC_QUERY_TIMEOUT, 123000 );
query.executeUpdate();
try {
List<Object[]> setQueryTimeoutCalls = CONNECTION_PROVIDER.spyContext.getCalls(
Statement.class.getMethod( "setQueryTimeout", int.class ),
CONNECTION_PROVIDER.getPreparedStatement( QUERY )
);
assertEquals( 2, setQueryTimeoutCalls.size() );
assertEquals( 123, setQueryTimeoutCalls.get( 0 )[0] );
assertEquals( 0, setQueryTimeoutCalls.get( 1 )[0] );
}
catch (Exception ex) {
fail( "should not have thrown exception" );
}
}
);
}
@Entity(name = "AnEntity")
@Table(name = "AnEntity")
public static
|
QueryTimeOutTest
|
java
|
micronaut-projects__micronaut-core
|
inject-groovy/src/main/groovy/io/micronaut/ast/groovy/visitor/GroovyElementFactory.java
|
{
"start": 9217,
"end": 9396
}
|
class ____ be a GroovyClassElement");
}
return new GroovyFieldElement(visitorContext, (GroovyClassElement) owningType, field, annotationMetadataFactory);
}
}
|
must
|
java
|
apache__camel
|
dsl/camel-jbang/camel-jbang-core/src/main/java/org/apache/camel/dsl/jbang/core/common/YamlHelper.java
|
{
"start": 1280,
"end": 1769
}
|
class ____ {
private YamlHelper() {
}
/**
* Creates new Yaml instance. The implementation provided by Snakeyaml is not thread-safe. It is better to create a
* fresh instance for every YAML stream.
*/
public static Yaml yaml() {
return yaml(null);
}
/**
* Creates new Yaml instance. The implementation provided by Snakeyaml is not thread-safe. It is better to create a
* fresh instance for every YAML stream. Uses the given
|
YamlHelper
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/OverriddenFormulasAnnotation.java
|
{
"start": 651,
"end": 1768
}
|
class ____
implements DialectOverride.Formulas, RepeatableContainer<DialectOverride.Formula> {
private DialectOverride.Formula[] value;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public OverriddenFormulasAnnotation(ModelsContext modelContext) {
}
/**
* Used in creating annotation instances from JDK variant
*/
public OverriddenFormulasAnnotation(DialectOverride.Formulas annotation, ModelsContext modelContext) {
this.value = extractJdkValue( annotation, DIALECT_OVERRIDE_FORMULAS, "value", modelContext );
}
/**
* Used in creating annotation instances from Jandex variant
*/
public OverriddenFormulasAnnotation(Map<String, Object> attributeValues, ModelsContext modelContext) {
this.value = (DialectOverride.Formula[]) attributeValues.get( "value" );
}
@Override
public DialectOverride.Formula[] value() {
return value;
}
@Override
public void value(DialectOverride.Formula[] value) {
this.value = value;
}
@Override
public Class<? extends Annotation> annotationType() {
return DialectOverride.Formulas.class;
}
}
|
OverriddenFormulasAnnotation
|
java
|
micronaut-projects__micronaut-core
|
core-processor/src/main/java/io/micronaut/inject/ast/utils/EnclosedElementsQuery.java
|
{
"start": 2193,
"end": 2246
}
|
class ____.
*
* @param classElement The
|
element
|
java
|
apache__camel
|
core/camel-management/src/test/java/org/apache/camel/management/ManagedBrowsableEndpointAsJSonTest.java
|
{
"start": 1560,
"end": 8201
}
|
class ____ extends ManagementTestSupport {
@Test
public void testBrowseableEndpointAsJSonIncludeBody() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(1);
Map<String, Object> headers = new HashMap<>();
headers.put("user", false);
headers.put("uid", 123);
headers.put("title", "Camel rocks");
template.sendBodyAndHeaders("direct:start", "Hello World", headers);
assertMockEndpointsSatisfied();
MBeanServer mbeanServer = getMBeanServer();
ObjectName name = getCamelObjectName(TYPE_ENDPOINT, "mock://result");
String out = (String) mbeanServer.invoke(name, "browseMessageAsJSon", new Object[] { 0, true },
new String[] { "java.lang.Integer", "java.lang.Boolean" });
assertNotNull(out);
log.info(out);
assertTrue(out.contains("\"body\": {"));
assertTrue(out.contains("\"value\": \"Hello World\""));
}
@Test
public void testBrowseableEndpointAsJSon() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(2);
template.sendBodyAndHeader("direct:start", "Hello World", "foo", 123);
template.sendBodyAndHeader("direct:start", "Bye World", "foo", 456);
assertMockEndpointsSatisfied();
MBeanServer mbeanServer = getMBeanServer();
ObjectName name = getCamelObjectName(TYPE_ENDPOINT, "mock://result");
String out = (String) mbeanServer.invoke(name, "browseMessageAsJSon", new Object[] { 0, false },
new String[] { "java.lang.Integer", "java.lang.Boolean" });
assertNotNull(out);
assertFalse(out.contains("\"body\": {"));
assertTrue(out.contains("\"value\": 123"));
out = (String) mbeanServer.invoke(name, "browseMessageAsJSon", new Object[] { 1, false },
new String[] { "java.lang.Integer", "java.lang.Boolean" });
assertNotNull(out);
assertFalse(out.contains("\"body\": {"));
assertTrue(out.contains("\"value\": 456"));
}
@Test
public void testBrowseableEndpointAsJSonAllIncludeBody() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(2);
template.sendBody("direct:start", "Hello World");
template.sendBodyAndHeader("direct:start", "Bye World", "foo", 456);
assertMockEndpointsSatisfied();
MBeanServer mbeanServer = getMBeanServer();
ObjectName name = getCamelObjectName(TYPE_ENDPOINT, "mock://result");
String out = (String) mbeanServer.invoke(name, "browseAllMessagesAsJSon", new Object[] { true },
new String[] { "java.lang.Boolean" });
assertNotNull(out);
log.info(out);
assertTrue(out.contains("\"value\": \"Hello World\""));
assertTrue(out.contains("\"value\": \"Bye World\""));
}
@Test
public void testBrowseableEndpointAsJSonAll() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(2);
template.sendBodyAndHeader("direct:start", "Hello World", "foo", 123);
template.sendBodyAndHeader("direct:start", "Bye World", "foo", 456);
assertMockEndpointsSatisfied();
MBeanServer mbeanServer = getMBeanServer();
ObjectName name = getCamelObjectName(TYPE_ENDPOINT, "mock://result");
String out = (String) mbeanServer.invoke(name, "browseAllMessagesAsJSon", new Object[] { false },
new String[] { "java.lang.Boolean" });
assertNotNull(out);
log.info(out);
assertFalse(out.contains("\"body\": {"));
assertTrue(out.contains("\"value\": 123"));
assertTrue(out.contains("\"value\": 456"));
}
@Test
public void testBrowseableEndpointAsJSonRangeIncludeBody() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(3);
template.sendBody("direct:start", "Hello World");
template.sendBodyAndHeader("direct:start", "Bye World", "foo", 456);
template.sendBody("direct:start", "Hi Camel");
assertMockEndpointsSatisfied();
MBeanServer mbeanServer = getMBeanServer();
ObjectName name = getCamelObjectName(TYPE_ENDPOINT, "mock://result");
String out = (String) mbeanServer.invoke(name, "browseRangeMessagesAsJSon", new Object[] { 0, 1, true },
new String[] { "java.lang.Integer", "java.lang.Integer", "java.lang.Boolean" });
assertNotNull(out);
log.info(out);
assertTrue(out.contains("\"value\": \"Hello World\""));
assertTrue(out.contains("\"value\": \"Bye World\""));
}
@Test
public void testBrowseableEndpointAsJSonRange() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(3);
template.sendBodyAndHeader("direct:start", "Hello World", "foo", 123);
template.sendBodyAndHeader("direct:start", "Bye World", "foo", 456);
template.sendBody("direct:start", "Hi Camel");
assertMockEndpointsSatisfied();
MBeanServer mbeanServer = getMBeanServer();
ObjectName name = getCamelObjectName(TYPE_ENDPOINT, "mock://result");
String out = (String) mbeanServer.invoke(name, "browseRangeMessagesAsJSon", new Object[] { 0, 1, false },
new String[] { "java.lang.Integer", "java.lang.Integer", "java.lang.Boolean" });
assertNotNull(out);
log.info(out);
assertFalse(out.contains("\"body\": {"));
assertTrue(out.contains("\"value\": 123"));
assertTrue(out.contains("\"value\": 456"));
}
@Test
public void testBrowseableEndpointAsJSonRangeInvalidIndex() throws Exception {
MBeanServer mbeanServer = getMBeanServer();
ObjectName name = getCamelObjectName(TYPE_ENDPOINT, "mock://result");
try {
mbeanServer.invoke(name, "browseRangeMessagesAsJSon", new Object[] { 3, 1, false },
new String[] { "java.lang.Integer", "java.lang.Integer", "java.lang.Boolean" });
fail("Should have thrown exception");
} catch (Exception e) {
assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertEquals("From index cannot be larger than to index, was: 3 > 1", e.getCause().getMessage());
}
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
context.setUseBreadcrumb(false);
from("direct:start").to("mock:result");
}
};
}
}
|
ManagedBrowsableEndpointAsJSonTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/ternary/Site.java
|
{
"start": 220,
"end": 926
}
|
class ____ {
private String name;
private String description;
private Set employees = new HashSet();
private Set managers = new HashSet();
Site() {}
public Site(String name) {
this.name=name;
}
public Set getManagers() {
return managers;
}
public void setManagers(Set managers) {
this.managers = managers;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Set getEmployees() {
return employees;
}
public void setEmployees(Set employees) {
this.employees = employees;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
|
Site
|
java
|
micronaut-projects__micronaut-core
|
context/src/test/groovy/io/micronaut/runtime/event/annotation/AsyncListener.java
|
{
"start": 846,
"end": 1293
}
|
class ____ {
boolean invoked = false;
@EventListener
@Async
CompletableFuture<Boolean> onStartup(StartupEvent event) {
try {
Thread.currentThread().sleep(500);
} catch (InterruptedException e) {
e.printStackTrace();
}
invoked = true;
return CompletableFuture.completedFuture(invoked);
}
public boolean isInvoked() {
return invoked;
}
}
|
AsyncListener
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tcks/lang-model-tck-runner/src/test/java/io/quarkus/arc/tck/LangModelTckExtension.java
|
{
"start": 436,
"end": 881
}
|
class ____ implements BuildCompatibleExtension {
@Discovery
public void addClass(ScannedClasses scan) {
// `LangModelVerifier` has no bean defining annotation
// and isn't discovered in annotated discovery
scan.add(LangModelVerifier.class.getName());
}
@Enhancement(types = LangModelVerifier.class)
public void run(ClassInfo clazz) {
LangModelVerifier.verify(clazz);
}
}
|
LangModelTckExtension
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/web/servlet/samples/client/context/JavaConfigTests.java
|
{
"start": 3454,
"end": 4113
}
|
class ____ implements WebMvcConfigurer {
@Autowired
private RootConfig rootConfig;
@Bean
PersonController personController() {
return new PersonController(this.rootConfig.personDao());
}
@Override
public void addResourceHandlers(ResourceHandlerRegistry registry) {
registry.addResourceHandler("/resources/**").addResourceLocations("/resources/");
}
@Override
public void addViewControllers(ViewControllerRegistry registry) {
registry.addViewController("/").setViewName("home");
}
@Override
public void configureDefaultServletHandling(DefaultServletHandlerConfigurer configurer) {
configurer.enable();
}
}
}
|
WebConfig
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java
|
{
"start": 1988,
"end": 8279
}
|
class ____ extends ViewFileSystemBaseTest {
private static FileSystem fsDefault;
private static MiniDFSCluster cluster;
private static final int NAME_SPACES_COUNT = 3;
private static final int DATA_NODES_COUNT = 3;
private static final int FS_INDEX_DEFAULT = 0;
private static final String LINK_MERGE_SLASH_CLUSTER_1_NAME = "ClusterLMS1";
private static final String LINK_MERGE_SLASH_CLUSTER_2_NAME = "ClusterLMS2";
private static final FileSystem[] FS_HDFS = new FileSystem[NAME_SPACES_COUNT];
private static final Configuration CONF = new Configuration();
private static final File TEST_DIR = GenericTestUtils.getTestDir(
TestViewFileSystemLinkMergeSlash.class.getSimpleName());
private static final String TEST_TEMP_PATH =
"/tmp/TestViewFileSystemLinkMergeSlash";
private final static Logger LOG = LoggerFactory.getLogger(
TestViewFileSystemLinkMergeSlash.class);
@Override
protected FileSystemTestHelper createFileSystemHelper() {
return new FileSystemTestHelper(TEST_TEMP_PATH);
}
@BeforeAll
public static void clusterSetupAtBeginning() throws IOException,
LoginException, URISyntaxException {
SupportsBlocks = true;
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
true);
cluster = new MiniDFSCluster.Builder(CONF)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
NAME_SPACES_COUNT))
.numDataNodes(DATA_NODES_COUNT)
.build();
cluster.waitClusterUp();
for (int i = 0; i < NAME_SPACES_COUNT; i++) {
FS_HDFS[i] = cluster.getFileSystem(i);
}
fsDefault = FS_HDFS[FS_INDEX_DEFAULT];
}
@AfterAll
public static void clusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Override
@BeforeEach
public void setUp() throws Exception {
fsTarget = fsDefault;
super.setUp();
}
/**
* Override this so that we don't set the targetTestRoot to any path under the
* root of the FS, and so that we don't try to delete the test dir, but rather
* only its contents.
*/
@Override
void initializeTargetTestRoot() throws IOException {
targetTestRoot = fsDefault.makeQualified(new Path("/"));
for (FileStatus status : fsDefault.listStatus(targetTestRoot)) {
fsDefault.delete(status.getPath(), true);
}
}
@Override
void setupMountPoints() {
super.setupMountPoints();
ConfigUtil.addLinkMergeSlash(conf, LINK_MERGE_SLASH_CLUSTER_1_NAME,
targetTestRoot.toUri());
ConfigUtil.addLinkMergeSlash(conf, LINK_MERGE_SLASH_CLUSTER_2_NAME,
targetTestRoot.toUri());
}
@Override
int getExpectedDelegationTokenCount() {
return 1; // all point to the same fs so 1 unique token
}
@Override
int getExpectedDelegationTokenCountWithCredentials() {
return 1;
}
@Test
public void testConfLinkMergeSlash() throws Exception {
TEST_DIR.mkdirs();
String clusterName = "ClusterMerge";
URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, clusterName,
"/", null, null);
String testFileName = "testLinkMergeSlash";
File infile = new File(TEST_DIR, testFileName);
final byte[] content = "HelloWorld".getBytes();
FileOutputStream fos = null;
try {
fos = new FileOutputStream(infile);
fos.write(content);
} finally {
if (fos != null) {
fos.close();
}
}
assertEquals((long)content.length, infile.length());
Configuration conf = new Configuration();
ConfigUtil.addLinkMergeSlash(conf, clusterName, TEST_DIR.toURI());
FileSystem vfs = FileSystem.get(viewFsUri, conf);
assertEquals(ViewFileSystem.class, vfs.getClass());
FileStatus stat = vfs.getFileStatus(new Path(viewFsUri.toString() +
testFileName));
LOG.info("File stat: " + stat);
vfs.close();
}
@Test
public void testConfLinkMergeSlashWithRegularLinks() throws Exception {
TEST_DIR.mkdirs();
String clusterName = "ClusterMerge";
String expectedErrorMsg1 = "Mount table ClusterMerge has already been " +
"configured with a merge slash link";
String expectedErrorMsg2 = "Mount table ClusterMerge has already been " +
"configured with regular links";
URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, clusterName,
"/", null, null);
Configuration conf = new Configuration();
ConfigUtil.addLinkMergeSlash(conf, clusterName, TEST_DIR.toURI());
ConfigUtil.addLink(conf, clusterName, "testDir", TEST_DIR.toURI());
try {
FileSystem.get(viewFsUri, conf);
fail("Shouldn't allow both merge slash link and regular link on same "
+ "mount table.");
} catch (IOException e) {
assertTrue(e.getMessage().contains(expectedErrorMsg1) || e.getMessage()
.contains(expectedErrorMsg2), "Unexpected error message: " + e.getMessage());
}
}
@Test
public void testConfLinkMergeSlashWithMountPoint() throws Exception {
TEST_DIR.mkdirs();
Configuration conf = new Configuration();
String clusterName = "ClusterX";
String mountPoint = "/user";
URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, clusterName,
"/", null, null);
String expectedErrorMsg = "Invalid linkMergeSlash entry in config: " +
"linkMergeSlash./user";
String mountTableEntry = Constants.CONFIG_VIEWFS_PREFIX + "."
+ clusterName + "." + Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH
+ "." + mountPoint;
conf.set(mountTableEntry, TEST_DIR.toURI().toString());
try {
FileSystem.get(viewFsUri, conf);
fail("Shouldn't allow linkMergeSlash to take extra mount points!");
} catch (IOException e) {
assertTrue(e.getMessage().contains(expectedErrorMsg));
}
}
@Test
public void testChildFileSystems() throws Exception {
URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME,
LINK_MERGE_SLASH_CLUSTER_1_NAME, "/", null, null);
FileSystem fs = FileSystem.get(viewFsUri, conf);
FileSystem[] childFs = fs.getChildFileSystems();
assertEquals(1, childFs.length, "Unexpected number of child filesystems!");
assertEquals(DistributedFileSystem.class, childFs[0].getClass(),
"Unexpected child filesystem!");
}
}
|
TestViewFileSystemLinkMergeSlash
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/SealedTypesWithSubtypesTest.java
|
{
"start": 3221,
"end": 3457
}
|
class ____ extends Interm1125 {
public int c;
public Impl1125() { }
protected Impl1125(int a0, int b0, int c0) {
a = a0;
b = b0;
c = c0;
}
}
static final
|
Impl1125
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/shard/DocsStatsTests.java
|
{
"start": 868,
"end": 2708
}
|
class ____ extends AbstractXContentTestCase<DocsStats> {
public void testUninitialisedShards() {
DocsStats stats = new DocsStats(0, 0, -1);
assertThat(stats.getTotalSizeInBytes(), equalTo(-1L));
stats.add(new DocsStats(0, 0, -1));
assertThat(stats.getTotalSizeInBytes(), equalTo(-1L));
stats.add(new DocsStats(1, 0, 10));
assertThat(stats.getTotalSizeInBytes(), equalTo(10L));
stats.add(new DocsStats(0, 0, -1));
assertThat(stats.getTotalSizeInBytes(), equalTo(10L));
stats.add(new DocsStats(1, 0, 20));
assertThat(stats.getTotalSizeInBytes(), equalTo(30L));
}
public void testSerialize() throws Exception {
DocsStats originalStats = new DocsStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong());
try (BytesStreamOutput out = new BytesStreamOutput()) {
originalStats.writeTo(out);
BytesReference bytes = out.bytes();
try (StreamInput in = bytes.streamInput()) {
DocsStats cloneStats = new DocsStats(in);
assertThat(cloneStats.getCount(), equalTo(originalStats.getCount()));
assertThat(cloneStats.getDeleted(), equalTo(originalStats.getDeleted()));
assertThat(cloneStats.getTotalSizeInBytes(), equalTo(originalStats.getTotalSizeInBytes()));
}
}
}
@Override
protected DocsStats createTestInstance() {
return new DocsStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong());
}
@Override
protected DocsStats doParseInstance(XContentParser parser) throws IOException {
return DocsStats.PARSER.parse(parser, null);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
}
|
DocsStatsTests
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/TopicSubscriptionChangeEvent.java
|
{
"start": 1227,
"end": 1764
}
|
class ____ extends SubscriptionChangeEvent {
private final Set<String> topics;
public TopicSubscriptionChangeEvent(final Set<String> topics, final Optional<ConsumerRebalanceListener> listener, final long deadlineMs) {
super(Type.TOPIC_SUBSCRIPTION_CHANGE, listener, deadlineMs);
this.topics = topics;
}
public Set<String> topics() {
return topics;
}
@Override
public String toStringBase() {
return super.toStringBase() + ", topics=" + topics;
}
}
|
TopicSubscriptionChangeEvent
|
java
|
quarkusio__quarkus
|
extensions/security/runtime/src/test/java/io/quarkus/security/runtime/QuarkusIdentityProviderManagerImplTest.java
|
{
"start": 2652,
"end": 2747
}
|
class ____ extends BaseAuthenticationRequest {
}
abstract static
|
TestAuthenticationRequest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/jdbc/spi/MutationStatementPreparer.java
|
{
"start": 409,
"end": 1964
}
|
interface ____ {
/**
* Prepare a statement.
*
* @param sql The SQL the statement to be prepared
* @param isCallable Whether to prepare as a callable statement.
*
* @return the prepared statement
*/
PreparedStatement prepareStatement(String sql, boolean isCallable);
/**
* Prepare an INSERT statement, specifying how auto-generated (by the database) keys should be handled. Really this
* is a boolean, but JDBC opted to define it instead using 2 int constants:
* <ul>
* <li>{@link PreparedStatement#RETURN_GENERATED_KEYS}</li>
* <li>{@link PreparedStatement#NO_GENERATED_KEYS}</li>
* </ul>
* <p>
* Generated keys are accessed afterwards via {@link PreparedStatement#getGeneratedKeys}
*
* @param sql The INSERT SQL
* @param autoGeneratedKeys The autoGeneratedKeys flag
*
* @return the prepared statement
*
* @see java.sql.Connection#prepareStatement(String, int)
*/
PreparedStatement prepareStatement(String sql, int autoGeneratedKeys);
/**
* Prepare an INSERT statement, specifying columns which are auto-generated values to be returned.
* Generated keys are accessed afterwards via {@link PreparedStatement#getGeneratedKeys}
*
* @param sql - the SQL for the statement to be prepared
* @param columnNames The name of the columns to be returned in the generated keys result set.
*
* @return the prepared statement
*
* @see java.sql.Connection#prepareStatement(String, String[])
*/
PreparedStatement prepareStatement(String sql, String[] columnNames);
}
|
MutationStatementPreparer
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TargetApplicationsNamespace.java
|
{
"start": 2048,
"end": 3790
}
|
class ____ implements
Evaluable<TargetApplications> {
public final static String NAMESPACE_DELIMITER = "/";
private AllocationTagNamespaceType nsType;
// Namespace scope value will be delay binding by eval method.
private Set<ApplicationId> nsScope;
public TargetApplicationsNamespace(AllocationTagNamespaceType
allocationTagNamespaceType) {
this.nsType = allocationTagNamespaceType;
}
protected void setScopeIfNotNull(Set<ApplicationId> appIds) {
if (appIds != null) {
this.nsScope = appIds;
}
}
/**
* Get the type of the namespace.
* @return namespace type.
*/
public AllocationTagNamespaceType getNamespaceType() {
return nsType;
}
/**
* Get the scope of the namespace, in form of a set of applications.
*
* @return a set of applications.
*/
public Set<ApplicationId> getNamespaceScope() {
if (this.nsScope == null) {
throw new IllegalStateException("Invalid namespace scope,"
+ " it is not initialized. Evaluate must be called before"
+ " a namespace can be consumed.");
}
return this.nsScope;
}
/**
* Evaluate the namespace against given target applications
* if it is necessary. Only self/not-self/app-label namespace types
* require this evaluation step, because they are not binding to a
* specific scope during initiating. So we do lazy binding for them
* in this method.
*
* @param target a generic type target that impacts this evaluation.
* @throws InvalidAllocationTagsQueryException if given string is not in valid format.
*/
@Override
public void evaluate(TargetApplications target)
throws InvalidAllocationTagsQueryException {
// Sub-
|
TargetApplicationsNamespace
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.