language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | core/camel-core-reifier/src/main/java/org/apache/camel/reifier/RemovePropertiesReifier.java | {
"start": 1158,
"end": 2392
} | class ____ extends ProcessorReifier<RemovePropertiesDefinition> {
public RemovePropertiesReifier(Route route, ProcessorDefinition<?> definition) {
super(route, (RemovePropertiesDefinition) definition);
}
@Override
public Processor createProcessor() throws Exception {
ObjectHelper.notNull(definition.getPattern(), "patterns", this);
RemovePropertiesProcessor answer;
if (definition.getExcludePatterns() != null) {
answer = new RemovePropertiesProcessor(
parseString(definition.getPattern()), parseStrings(definition.getExcludePatterns()));
} else if (definition.getExcludePattern() != null) {
answer = new RemovePropertiesProcessor(
parseString(definition.getPattern()), parseStrings(new String[] { definition.getExcludePattern() }));
} else {
answer = new RemovePropertiesProcessor(parseString(definition.getPattern()), null);
}
answer.setDisabled(isDisabled(camelContext, definition));
return answer;
}
private String[] parseStrings(String[] array) {
return Stream.of(array).map(this::parseString).toArray(String[]::new);
}
}
| RemovePropertiesReifier |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java | {
"start": 2481,
"end": 22656
} | class ____ extends AbstractQueryTestCase<NestedQueryBuilder> {
private static final String VECTOR_FIELD = "vector";
private static final int VECTOR_DIMENSION = 3;
@Override
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
mapperService.merge(
"_doc",
new CompressedXContent(
Strings.toString(
PutMappingRequest.simpleMapping(
TEXT_FIELD_NAME,
"type=text",
INT_FIELD_NAME,
"type=integer",
DOUBLE_FIELD_NAME,
"type=double",
BOOLEAN_FIELD_NAME,
"type=boolean",
DATE_FIELD_NAME,
"type=date",
OBJECT_FIELD_NAME,
"type=object",
GEO_POINT_FIELD_NAME,
"type=geo_point",
"nested1",
"type=nested"
)
)
),
MapperService.MergeReason.MAPPING_UPDATE
);
XContentBuilder builder = XContentFactory.jsonBuilder()
.startObject()
.startObject("properties")
.startObject("nested1")
.field("type", "nested")
.startObject("properties")
.startObject(VECTOR_FIELD)
.field("type", "dense_vector")
.field("dims", VECTOR_DIMENSION)
.field("index", true)
.field("similarity", "cosine")
.endObject()
.endObject()
.endObject()
.endObject()
.endObject();
mapperService.merge(
MapperService.SINGLE_MAPPING_NAME,
new CompressedXContent(Strings.toString(builder)),
MapperService.MergeReason.MAPPING_UPDATE
);
}
/**
* @return a {@link NestedQueryBuilder} with random values all over the place
*/
@Override
protected NestedQueryBuilder doCreateTestQueryBuilder() {
QueryBuilder innerQueryBuilder = RandomQueryBuilder.createQuery(random());
NestedQueryBuilder nqb = new NestedQueryBuilder("nested1", innerQueryBuilder, RandomPicks.randomFrom(random(), ScoreMode.values()));
nqb.ignoreUnmapped(randomBoolean());
if (randomBoolean()) {
nqb.innerHit(
new InnerHitBuilder(randomAlphaOfLengthBetween(1, 10)).setSize(randomIntBetween(0, 100))
.addSort(new FieldSortBuilder(INT_FIELD_NAME).order(SortOrder.ASC))
.setIgnoreUnmapped(nqb.ignoreUnmapped())
);
}
return nqb;
}
@Override
protected NestedQueryBuilder createQueryWithInnerQuery(QueryBuilder queryBuilder) {
return new NestedQueryBuilder("path", queryBuilder, ScoreMode.None);
}
@Override
protected void doAssertLuceneQuery(NestedQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException {
assertThat(query, instanceOf(ESToParentBlockJoinQuery.class));
// TODO how to assert this?
if (queryBuilder.innerHit() != null) {
// have to rewrite again because the provided queryBuilder hasn't been rewritten (directly returned from
// doCreateTestQueryBuilder)
queryBuilder = (NestedQueryBuilder) queryBuilder.rewrite(context);
assertNotNull(context);
Map<String, InnerHitContextBuilder> innerHitInternals = new HashMap<>();
InnerHitContextBuilder.extractInnerHits(queryBuilder, innerHitInternals);
assertTrue(innerHitInternals.containsKey(queryBuilder.innerHit().getName()));
InnerHitContextBuilder innerHits = innerHitInternals.get(queryBuilder.innerHit().getName());
assertEquals(innerHits.innerHitBuilder(), queryBuilder.innerHit());
}
}
/**
* Test (de)serialization on all previous released versions
*/
public void testSerializationBWC() throws IOException {
for (TransportVersion version : TransportVersionUtils.allReleasedVersions()) {
NestedQueryBuilder testQuery = createTestQueryBuilder();
assertSerialization(testQuery, version);
}
}
public void testValidate() {
QueryBuilder innerQuery = RandomQueryBuilder.createQuery(random());
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> QueryBuilders.nestedQuery(null, innerQuery, ScoreMode.Avg)
);
assertThat(e.getMessage(), equalTo("[nested] requires 'path' field"));
e = expectThrows(IllegalArgumentException.class, () -> QueryBuilders.nestedQuery("foo", null, ScoreMode.Avg));
assertThat(e.getMessage(), equalTo("[nested] requires 'query' field"));
e = expectThrows(IllegalArgumentException.class, () -> QueryBuilders.nestedQuery("foo", innerQuery, null));
assertThat(e.getMessage(), equalTo("[nested] requires 'score_mode' field"));
}
public void testParseDefaultsRemoved() throws IOException {
/*
* This json includes many defaults. When we parse the query and then
* call toString on it all of the defaults are removed.
*/
String json = """
{
"nested" : {
"query" : {
"bool" : {
"must" : [ {
"match" : {
"obj1.name" : {
"query" : "blue",
"operator" : "OR",
"prefix_length" : 0,
"max_expansions" : 50,
"fuzzy_transpositions" : true,
"lenient" : false,
"zero_terms_query" : "NONE",
"auto_generate_synonyms_phrase_query" : true,
"boost" : 1.0
}
}
}, {
"range" : {
"obj1.count" : {
"gt" : 5,
"boost" : 1.0
}
}
} ],
"boost" : 1.0
}
},
"path" : "obj1",
"ignore_unmapped" : false,
"score_mode" : "avg",
"boost" : 1.0
}
}""";
NestedQueryBuilder parsed = (NestedQueryBuilder) parseQuery(json);
checkGeneratedJson("""
{
"nested" : {
"query" : {
"bool" : {
"must" : [ {
"match" : {
"obj1.name" : {
"query" : "blue"
}
}
}, {
"range" : {
"obj1.count" : {
"gt" : 5,
"boost" : 1.0
}
}
} ],
"boost" : 1.0
}
},
"path" : "obj1",
"ignore_unmapped" : false,
"score_mode" : "avg",
"boost" : 1.0
}
}""", parsed);
assertEquals(json, ScoreMode.Avg, parsed.scoreMode());
}
@Override
public void testMustRewrite() throws IOException {
SearchExecutionContext context = createSearchExecutionContext();
context.setAllowUnmappedFields(true);
TermQueryBuilder innerQueryBuilder = new TermQueryBuilder("nested1.unmapped_field", "foo");
NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder(
"nested1",
innerQueryBuilder,
RandomPicks.randomFrom(random(), ScoreMode.values())
);
IllegalStateException e = expectThrows(IllegalStateException.class, () -> nestedQueryBuilder.toQuery(context));
assertEquals("Rewrite first", e.getMessage());
}
public void testKnnRewriteForInnerHits() throws IOException {
SearchExecutionContext context = createSearchExecutionContext();
context.setAllowUnmappedFields(true);
KnnVectorQueryBuilder innerQueryBuilder = new KnnVectorQueryBuilder(
"nested1." + VECTOR_FIELD,
new float[] { 1.0f, 2.0f, 3.0f },
null,
1,
10f,
null,
null
);
NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder(
"nested1",
innerQueryBuilder,
RandomPicks.randomFrom(random(), ScoreMode.values())
);
InnerHitsRewriteContext rewriteContext = new InnerHitsRewriteContext(context.getParserConfig(), context::nowInMillis);
QueryBuilder queryBuilder = Rewriteable.rewrite(nestedQueryBuilder, rewriteContext, true);
assertTrue(queryBuilder instanceof NestedQueryBuilder);
NestedQueryBuilder rewritten = (NestedQueryBuilder) queryBuilder;
assertTrue(rewritten.query() instanceof ExactKnnQueryBuilder);
}
public void testIgnoreUnmapped() throws IOException {
final NestedQueryBuilder queryBuilder = new NestedQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None);
queryBuilder.ignoreUnmapped(true);
Query query = queryBuilder.toQuery(createSearchExecutionContext());
assertThat(query, notNullValue());
assertThat(query, instanceOf(MatchNoDocsQuery.class));
final NestedQueryBuilder failingQueryBuilder = new NestedQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None);
failingQueryBuilder.ignoreUnmapped(false);
QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(createSearchExecutionContext()));
assertThat(e.getMessage(), containsString("[" + NestedQueryBuilder.NAME + "] failed to find nested object under path [unmapped]"));
}
public void testIgnoreUnmappedWithRewrite() throws IOException {
// WrapperQueryBuilder makes sure we always rewrite
final NestedQueryBuilder queryBuilder = new NestedQueryBuilder(
"unmapped",
new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()),
ScoreMode.None
);
queryBuilder.ignoreUnmapped(true);
SearchExecutionContext searchExecutionContext = createSearchExecutionContext();
Query query = queryBuilder.rewrite(searchExecutionContext).toQuery(searchExecutionContext);
assertThat(query, notNullValue());
assertThat(query, instanceOf(MatchNoDocsQuery.class));
}
public void testMinFromString() {
assertThat("fromString(min) != MIN", ScoreMode.Min, equalTo(NestedQueryBuilder.parseScoreMode("min")));
assertThat("min", equalTo(NestedQueryBuilder.scoreModeAsString(ScoreMode.Min)));
}
public void testMaxFromString() {
assertThat("fromString(max) != MAX", ScoreMode.Max, equalTo(NestedQueryBuilder.parseScoreMode("max")));
assertThat("max", equalTo(NestedQueryBuilder.scoreModeAsString(ScoreMode.Max)));
}
public void testAvgFromString() {
assertThat("fromString(avg) != AVG", ScoreMode.Avg, equalTo(NestedQueryBuilder.parseScoreMode("avg")));
assertThat("avg", equalTo(NestedQueryBuilder.scoreModeAsString(ScoreMode.Avg)));
}
public void testSumFromString() {
assertThat("fromString(total) != SUM", ScoreMode.Total, equalTo(NestedQueryBuilder.parseScoreMode("sum")));
assertThat("sum", equalTo(NestedQueryBuilder.scoreModeAsString(ScoreMode.Total)));
}
public void testNoneFromString() {
assertThat("fromString(none) != NONE", ScoreMode.None, equalTo(NestedQueryBuilder.parseScoreMode("none")));
assertThat("none", equalTo(NestedQueryBuilder.scoreModeAsString(ScoreMode.None)));
}
/**
* Should throw {@link IllegalArgumentException} instead of NPE.
*/
public void testThatNullFromStringThrowsException() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> NestedQueryBuilder.parseScoreMode(null));
assertEquals("No score mode for child query [null] found", e.getMessage());
}
/**
* Failure should not change (and the value should never match anything...).
*/
public void testThatUnrecognizedFromStringThrowsException() {
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> NestedQueryBuilder.parseScoreMode("unrecognized value")
);
assertEquals("No score mode for child query [unrecognized value] found", e.getMessage());
}
public void testInlineLeafInnerHitsNestedQuery() throws Exception {
InnerHitBuilder leafInnerHits = randomNestedInnerHits();
NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None);
nestedQueryBuilder.innerHit(leafInnerHits);
Map<String, InnerHitContextBuilder> innerHitBuilders = new HashMap<>();
nestedQueryBuilder.extractInnerHitBuilders(innerHitBuilders);
assertThat(innerHitBuilders.get(leafInnerHits.getName()), Matchers.notNullValue());
}
public void testInlineLeafInnerHitsNestedQueryViaBoolQuery() {
InnerHitBuilder leafInnerHits = randomNestedInnerHits();
NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None).innerHit(
leafInnerHits
);
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder().should(nestedQueryBuilder);
Map<String, InnerHitContextBuilder> innerHitBuilders = new HashMap<>();
boolQueryBuilder.extractInnerHitBuilders(innerHitBuilders);
assertThat(innerHitBuilders.get(leafInnerHits.getName()), Matchers.notNullValue());
}
public void testInlineLeafInnerHitsNestedQueryViaConstantScoreQuery() {
InnerHitBuilder leafInnerHits = randomNestedInnerHits();
NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None).innerHit(
leafInnerHits
);
ConstantScoreQueryBuilder constantScoreQueryBuilder = new ConstantScoreQueryBuilder(nestedQueryBuilder);
Map<String, InnerHitContextBuilder> innerHitBuilders = new HashMap<>();
constantScoreQueryBuilder.extractInnerHitBuilders(innerHitBuilders);
assertThat(innerHitBuilders.get(leafInnerHits.getName()), Matchers.notNullValue());
}
public void testInlineLeafInnerHitsNestedQueryViaBoostingQuery() {
InnerHitBuilder leafInnerHits1 = randomNestedInnerHits();
NestedQueryBuilder nestedQueryBuilder1 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None).innerHit(
leafInnerHits1
);
InnerHitBuilder leafInnerHits2 = randomNestedInnerHits();
NestedQueryBuilder nestedQueryBuilder2 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None).innerHit(
leafInnerHits2
);
BoostingQueryBuilder constantScoreQueryBuilder = new BoostingQueryBuilder(nestedQueryBuilder1, nestedQueryBuilder2);
Map<String, InnerHitContextBuilder> innerHitBuilders = new HashMap<>();
constantScoreQueryBuilder.extractInnerHitBuilders(innerHitBuilders);
assertThat(innerHitBuilders.get(leafInnerHits1.getName()), Matchers.notNullValue());
assertThat(innerHitBuilders.get(leafInnerHits2.getName()), Matchers.notNullValue());
}
public void testInlineLeafInnerHitsNestedQueryViaFunctionScoreQuery() {
InnerHitBuilder leafInnerHits = randomNestedInnerHits();
NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None).innerHit(
leafInnerHits
);
FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(nestedQueryBuilder);
Map<String, InnerHitContextBuilder> innerHitBuilders = new HashMap<>();
((AbstractQueryBuilder<?>) functionScoreQueryBuilder).extractInnerHitBuilders(innerHitBuilders);
assertThat(innerHitBuilders.get(leafInnerHits.getName()), Matchers.notNullValue());
}
public void testBuildIgnoreUnmappedNestQuery() throws Exception {
SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class);
IndexSettings settings = new IndexSettings(newIndexMeta("index", Settings.EMPTY), Settings.EMPTY);
when(searchExecutionContext.getIndexSettings()).thenReturn(settings);
when(searchExecutionContext.nestedLookup()).thenReturn(NestedLookup.EMPTY);
SearchContext searchContext = mock(SearchContext.class);
when(searchContext.getSearchExecutionContext()).thenReturn(searchExecutionContext);
InnerHitBuilder leafInnerHits = randomNestedInnerHits();
NestedQueryBuilder query1 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None);
query1.innerHit(leafInnerHits);
final Map<String, InnerHitContextBuilder> innerHitBuilders = new HashMap<>();
final InnerHitsContext innerHitsContext = new InnerHitsContext();
expectThrows(IllegalStateException.class, () -> {
query1.extractInnerHitBuilders(innerHitBuilders);
assertThat(innerHitBuilders.size(), Matchers.equalTo(1));
assertTrue(innerHitBuilders.containsKey(leafInnerHits.getName()));
innerHitBuilders.get(leafInnerHits.getName()).build(searchContext, innerHitsContext);
});
innerHitBuilders.clear();
NestedQueryBuilder query2 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None);
query2.ignoreUnmapped(true);
query2.innerHit(leafInnerHits);
query2.extractInnerHitBuilders(innerHitBuilders);
assertThat(innerHitBuilders.size(), Matchers.equalTo(1));
assertTrue(innerHitBuilders.containsKey(leafInnerHits.getName()));
assertThat(innerHitBuilders.get(leafInnerHits.getName()), instanceOf(NestedQueryBuilder.NestedInnerHitContextBuilder.class));
NestedQueryBuilder.NestedInnerHitContextBuilder nestedContextBuilder =
(NestedQueryBuilder.NestedInnerHitContextBuilder) innerHitBuilders.get(leafInnerHits.getName());
nestedContextBuilder.build(searchContext, innerHitsContext);
assertThat(innerHitsContext.getInnerHits().size(), Matchers.equalTo(0));
}
public void testExtractInnerHitBuildersWithDuplicate() {
final NestedQueryBuilder queryBuilder = new NestedQueryBuilder(
"path",
new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()),
ScoreMode.None
);
queryBuilder.innerHit(new InnerHitBuilder("some_name"));
IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> InnerHitContextBuilder.extractInnerHits(queryBuilder, Collections.singletonMap("some_name", null))
);
assertEquals("[inner_hits] already contains an entry for key [some_name]", e.getMessage());
}
public void testDisallowExpensiveQueries() {
SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class);
when(searchExecutionContext.allowExpensiveQueries()).thenReturn(false);
NestedQueryBuilder queryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None);
ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> queryBuilder.toQuery(searchExecutionContext));
assertEquals("[joining] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage());
}
}
| NestedQueryBuilderTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/android/RectIntersectReturnValueIgnoredTest.java | {
"start": 1263,
"end": 1962
} | class ____ {
public boolean intersect(int x, int y, int x2, int y2) {
return false;
}
public boolean intersect(Rect other) {
return false;
}
public void setEmpty() {}
}\
""")
.setArgs(ImmutableList.of("-XDandroidCompatible=true"));
@Test
public void positiveCases() {
compilationHelper
.addSourceLines(
"RectIntersectReturnValueIgnoredPositiveCases.java",
"""
package com.google.errorprone.bugpatterns.android.testdata;
import android.graphics.Rect;
/**
* @author avenet@google.com (Arnaud J. Venet)
*/
public | Rect |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/aggregate/MySQLAggregateSupport.java | {
"start": 2120,
"end": 9514
} | class ____ extends AggregateSupportImpl {
private static final AggregateSupport JSON_INSTANCE = new MySQLAggregateSupport( true, false );
private static final AggregateSupport JSON_WITH_UUID_INSTANCE = new MySQLAggregateSupport( true, true );
private static final AggregateSupport LONGTEXT_INSTANCE = new MySQLAggregateSupport( false, false );
private final boolean jsonType;
private final boolean uuidFunctions;
private MySQLAggregateSupport(boolean jsonType, boolean uuidFunctions) {
this.jsonType = jsonType;
this.uuidFunctions = uuidFunctions;
}
public static AggregateSupport forMySQL(Dialect dialect) {
return dialect.getVersion().isSameOrAfter( 8 )
? JSON_WITH_UUID_INSTANCE
: dialect.getVersion().isSameOrAfter( 5, 7 )
? JSON_INSTANCE
: AggregateSupportImpl.INSTANCE;
}
public static AggregateSupport forTiDB(Dialect dialect) {
return JSON_WITH_UUID_INSTANCE;
}
public static AggregateSupport forMariaDB(Dialect dialect) {
return LONGTEXT_INSTANCE;
}
@Override
public String aggregateComponentCustomReadExpression(
String template,
String placeholder,
String aggregateParentReadExpression,
String columnExpression,
int aggregateColumnTypeCode,
SqlTypedMapping column,
TypeConfiguration typeConfiguration) {
switch ( aggregateColumnTypeCode ) {
case JSON_ARRAY:
case JSON:
switch ( column.getJdbcMapping().getJdbcType().getDefaultSqlTypeCode() ) {
case JSON:
case JSON_ARRAY:
return template.replace(
placeholder,
queryExpression( aggregateParentReadExpression, columnExpression )
);
case BOOLEAN:
return template.replace(
placeholder,
jsonType
? "case " + queryExpression( aggregateParentReadExpression, columnExpression ) + " when cast('true' as json) then true when cast('false' as json) then false end"
: "case " + queryExpression( aggregateParentReadExpression, columnExpression ) + " when 'true' then true when 'false' then false end"
);
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
// We encode binary data as hex, so we have to decode here
return template.replace(
placeholder,
"unhex(json_unquote(" + queryExpression( aggregateParentReadExpression, columnExpression ) + "))"
);
case UUID:
if ( column.getJdbcMapping().getJdbcType().isBinary() ) {
if ( uuidFunctions ) {
return template.replace(
placeholder,
"uuid_to_bin(json_unquote(" + queryExpression( aggregateParentReadExpression, columnExpression ) + "))"
);
}
else {
return template.replace(
placeholder,
"unhex(replace(json_unquote(" + queryExpression( aggregateParentReadExpression,
columnExpression ) + "),'-',''))"
);
}
}
// Fall-through intended
default:
return template.replace(
placeholder,
valueExpression( aggregateParentReadExpression, columnExpression, columnCastType( column ) )
);
}
}
throw new IllegalArgumentException( "Unsupported aggregate SQL type: " + aggregateColumnTypeCode );
}
private String columnCastType(SqlTypedMapping column) {
return switch (column.getJdbcMapping().getJdbcType().getDdlTypeCode()) {
// special case for casting to Boolean
case BOOLEAN, BIT -> "unsigned";
// MySQL doesn't let you cast to INTEGER/BIGINT/TINYINT
case TINYINT, SMALLINT, INTEGER, BIGINT -> "signed";
case REAL -> "float";
case DOUBLE -> "double";
case FLOAT -> jsonType
// In newer versions of MySQL, casting to float/double is supported
? column.getColumnDefinition()
: column.getPrecision() == null || column.getPrecision() == 53 ? "double" : "float";
// MySQL doesn't let you cast to TEXT/LONGTEXT
case CHAR, VARCHAR, LONG32VARCHAR, CLOB, ENUM -> "char";
case NCHAR, NVARCHAR, LONG32NVARCHAR, NCLOB -> "char character set utf8mb4";
// MySQL doesn't let you cast to BLOB/TINYBLOB/LONGBLOB
case BINARY, VARBINARY, LONG32VARBINARY, BLOB -> "binary";
default -> column.getColumnDefinition();
};
}
private String valueExpression(String aggregateParentReadExpression, String columnExpression, String columnType) {
return "cast(json_unquote(" + queryExpression( aggregateParentReadExpression, columnExpression ) + ") as " + columnType + ')';
}
private String queryExpression(String aggregateParentReadExpression, String columnExpression) {
if ( jsonType ) {
return "nullif(json_extract(" + aggregateParentReadExpression + ",'$." + columnExpression + "'),cast('null' as json))";
}
else {
return "nullif(json_extract(" + aggregateParentReadExpression + ",'$." + columnExpression + "'),'null')";
}
}
private String jsonCustomWriteExpression(String customWriteExpression, JdbcMapping jdbcMapping) {
final int sqlTypeCode = jdbcMapping.getJdbcType().getDefaultSqlTypeCode();
switch ( sqlTypeCode ) {
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
case BLOB:
// We encode binary data as hex
return "hex(" + customWriteExpression + ")";
case BOOLEAN:
return "(" + customWriteExpression + ")=true";
case TIMESTAMP:
return "date_format(" + customWriteExpression + ",'%Y-%m-%dT%T.%f')";
case TIMESTAMP_UTC:
return "date_format(" + customWriteExpression + ",'%Y-%m-%dT%T.%fZ')";
case UUID:
if ( jdbcMapping.getJdbcType().isBinary() ) {
if ( uuidFunctions ) {
return "bin_to_uuid(" + customWriteExpression + ")";
}
else if ( jsonType ) {
return "insert(insert(insert(insert(lower(hex(" + customWriteExpression + ")),21,0,'-'),17,0,'-'),13,0,'-'),9,0,'-')";
}
else {
return "regexp_replace(lower(hex(" + customWriteExpression + ")),'^(.{8})(.{4})(.{4})(.{4})(.{12})$','\\\\1-\\\\2-\\\\3-\\\\4-\\\\5')";
}
}
// Fall-through intended
default:
return customWriteExpression;
}
}
@Override
public String aggregateComponentAssignmentExpression(
String aggregateParentAssignmentExpression,
String columnExpression,
int aggregateColumnTypeCode,
Column column) {
switch ( aggregateColumnTypeCode ) {
case JSON:
case JSON_ARRAY:
// For JSON we always have to replace the whole object
return aggregateParentAssignmentExpression;
}
throw new IllegalArgumentException( "Unsupported aggregate SQL type: " + aggregateColumnTypeCode );
}
@Override
public boolean requiresAggregateCustomWriteExpressionRenderer(int aggregateSqlTypeCode) {
switch ( aggregateSqlTypeCode ) {
case JSON:
return true;
}
return false;
}
@Override
public WriteExpressionRenderer aggregateCustomWriteExpressionRenderer(
SelectableMapping aggregateColumn,
SelectableMapping[] columnsToUpdate,
TypeConfiguration typeConfiguration) {
final int aggregateSqlTypeCode = aggregateColumn.getJdbcMapping().getJdbcType().getDefaultSqlTypeCode();
switch ( aggregateSqlTypeCode ) {
case JSON:
return jsonAggregateColumnWriter( aggregateColumn, columnsToUpdate );
}
throw new IllegalArgumentException( "Unsupported aggregate SQL type: " + aggregateSqlTypeCode );
}
private WriteExpressionRenderer jsonAggregateColumnWriter(
SelectableMapping aggregateColumn,
SelectableMapping[] columns) {
return new RootJsonWriteExpression( aggregateColumn, columns );
}
| MySQLAggregateSupport |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/hbm/index/PersonGroup.java | {
"start": 241,
"end": 972
} | class ____ {
private long id;
private String name;
private Set<Person> persons = new HashSet<Person>();
private Map<String, String> comments = new HashMap<String,String>();
public PersonGroup(String name) {
this.name = name;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Set<Person> getPersons() {
return persons;
}
public void setPersons(Set<Person> persons) {
this.persons = persons;
}
public Map<String, String> getComments() {
return comments;
}
public void setComments(Map<String, String> comments) {
this.comments = comments;
}
}
| PersonGroup |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4070WhitespaceTrimmingTest.java | {
"start": 1155,
"end": 2208
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that whitespace around artifact coordinates does not change artifact identity.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-4070");
Verifier verifier = newVerifier(new File(testDir, "sub").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.deleteArtifacts("org.apache.maven.its.mng4070");
verifier.filterFile("settings-template.xml", "settings.xml");
verifier.addCliArgument("--settings");
verifier.addCliArgument("settings.xml");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
List<String> artifacts = verifier.loadLines("target/artifacts.txt");
assertEquals(Collections.singletonList("org.apache.maven.its.mng4070:a:jar:0.1"), artifacts);
}
}
| MavenITmng4070WhitespaceTrimmingTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/inject/dagger/PrivateConstructorForNoninstantiableModuleTest.java | {
"start": 2744,
"end": 3414
} | class ____ {
@Provides
static String provideString() {
return "";
}
@Provides
static Integer provideInteger() {
return 1;
}
private TestModule() {}
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void abstractClassWithStaticAndAbstractMethods() {
testHelper
.addInputLines(
"in/TestModule.java",
"""
import dagger.Binds;
import dagger.Module;
import dagger.Provides;
@Module
abstract | TestModule |
java | quarkusio__quarkus | extensions/smallrye-fault-tolerance/deployment/src/test/java/io/quarkus/smallrye/faulttolerance/test/circuitbreaker/maintenance/duplicate/CircuitBreakerService2.java | {
"start": 262,
"end": 402
} | class ____ {
@CircuitBreaker
@CircuitBreakerName("hello")
public String hello() {
return "2";
}
}
| CircuitBreakerService2 |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java | {
"start": 2738,
"end": 4171
} | class ____ extends ESTestCase {
private IndexAnalyzers indexAnalyzers;
private IndexSettings indexSettings;
private AnalysisRegistry registry;
private int maxTokenCount;
private int idxMaxTokenCount;
@Override
public void setUp() throws Exception {
super.setUp();
Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
Settings indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard")
.put("index.analysis.analyzer.custom_analyzer.filter", "mock")
.put("index.analysis.normalizer.my_normalizer.type", "custom")
.put("index.analysis.char_filter.my_append.type", "append")
.put("index.analysis.char_filter.my_append.suffix", "baz")
.put("index.analyze.max_token_count", 100)
.putList("index.analysis.normalizer.my_normalizer.filter", "lowercase")
.build();
this.indexSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
Environment environment = TestEnvironment.newEnvironment(settings);
AnalysisPlugin plugin = new AnalysisPlugin() {
| TransportAnalyzeActionTests |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/network/ConnectionDisconnectListener.java | {
"start": 1031,
"end": 1564
} | interface ____ {
/**
* Invoked when a connection is disconnected.
* <p>
* <em>Note</em>: The method is invoked when the connection to the client is closed hence the
* implementation of this method should not perform any blocking operations.
*
* @param connectionId The connection id as defined in {@link org.apache.kafka.common.requests.RequestContext}.
* This id is unique for each connection.
*/
void onDisconnect(String connectionId);
}
| ConnectionDisconnectListener |
java | apache__kafka | storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/serialization/RemoteLogSegmentMetadataUpdateTransform.java | {
"start": 1490,
"end": 3955
} | class ____ implements RemoteLogMetadataTransform<RemoteLogSegmentMetadataUpdate> {
public ApiMessageAndVersion toApiMessageAndVersion(RemoteLogSegmentMetadataUpdate segmentMetadataUpdate) {
RemoteLogSegmentMetadataUpdateRecord record = new RemoteLogSegmentMetadataUpdateRecord()
.setRemoteLogSegmentId(createRemoteLogSegmentIdEntry(segmentMetadataUpdate))
.setBrokerId(segmentMetadataUpdate.brokerId())
.setEventTimestampMs(segmentMetadataUpdate.eventTimestampMs())
.setRemoteLogSegmentState(segmentMetadataUpdate.state().id());
segmentMetadataUpdate.customMetadata().ifPresent(md -> record.setCustomMetadata(md.value()));
return new ApiMessageAndVersion(record, record.highestSupportedVersion());
}
public RemoteLogSegmentMetadataUpdate fromApiMessageAndVersion(ApiMessageAndVersion apiMessageAndVersion) {
RemoteLogSegmentMetadataUpdateRecord record = (RemoteLogSegmentMetadataUpdateRecord) apiMessageAndVersion.message();
RemoteLogSegmentMetadataUpdateRecord.RemoteLogSegmentIdEntry entry = record.remoteLogSegmentId();
TopicIdPartition topicIdPartition = new TopicIdPartition(entry.topicIdPartition().id(),
new TopicPartition(entry.topicIdPartition().name(), entry.topicIdPartition().partition()));
Optional<CustomMetadata> customMetadata = Optional.ofNullable(record.customMetadata()).map(CustomMetadata::new);
return new RemoteLogSegmentMetadataUpdate(new RemoteLogSegmentId(topicIdPartition, entry.id()),
record.eventTimestampMs(), customMetadata, RemoteLogSegmentState.forId(record.remoteLogSegmentState()), record.brokerId());
}
private RemoteLogSegmentMetadataUpdateRecord.RemoteLogSegmentIdEntry createRemoteLogSegmentIdEntry(RemoteLogSegmentMetadataUpdate data) {
return new RemoteLogSegmentMetadataUpdateRecord.RemoteLogSegmentIdEntry()
.setId(data.remoteLogSegmentId().id())
.setTopicIdPartition(
new RemoteLogSegmentMetadataUpdateRecord.TopicIdPartitionEntry()
.setName(data.remoteLogSegmentId().topicIdPartition().topic())
.setPartition(data.remoteLogSegmentId().topicIdPartition().partition())
.setId(data.remoteLogSegmentId().topicIdPartition().topicId()));
}
}
| RemoteLogSegmentMetadataUpdateTransform |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/maybe/MaybeConcatArray.java | {
"start": 1124,
"end": 1559
} | class ____<T> extends Flowable<T> {
final MaybeSource<? extends T>[] sources;
public MaybeConcatArray(MaybeSource<? extends T>[] sources) {
this.sources = sources;
}
@Override
protected void subscribeActual(Subscriber<? super T> s) {
ConcatMaybeObserver<T> parent = new ConcatMaybeObserver<>(s, sources);
s.onSubscribe(parent);
parent.drain();
}
static final | MaybeConcatArray |
java | google__gson | gson/src/test/java/com/google/gson/functional/CustomDeserializerTest.java | {
"start": 4186,
"end": 4310
} | class ____ {
static final String TYPE_ACCESS = "__type__";
}
@SuppressWarnings("ImmutableEnumChecker")
private | MyBase |
java | google__guava | android/guava/src/com/google/common/collect/Streams.java | {
"start": 31102,
"end": 31426
} | interface ____<R extends @Nullable Object> {
/** Applies this function to the given argument and its index within a stream. */
@ParametricNullness
R apply(long from, long index);
}
/**
* An analogue of {@link java.util.function.DoubleFunction} also accepting an index.
*
* <p>This | LongFunctionWithIndex |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/querycache/HqlQueryCachePutResultTransformerTest.java | {
"start": 209,
"end": 478
} | class ____ extends HqlQueryCacheIgnoreResultTransformerTest {
@Override
protected CacheMode getQueryCacheMode() {
return CacheMode.PUT;
}
@Override
protected boolean areDynamicNonLazyAssociationsChecked() {
return false;
}
}
| HqlQueryCachePutResultTransformerTest |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/observers/QueueDrainObserver.java | {
"start": 1219,
"end": 3980
} | class ____<T, U, V> extends QueueDrainSubscriberPad2 implements Observer<T>, ObservableQueueDrain<U, V> {
protected final Observer<? super V> downstream;
protected final SimplePlainQueue<U> queue;
protected volatile boolean cancelled;
protected volatile boolean done;
protected Throwable error;
public QueueDrainObserver(Observer<? super V> actual, SimplePlainQueue<U> queue) {
this.downstream = actual;
this.queue = queue;
}
@Override
public final boolean cancelled() {
return cancelled;
}
@Override
public final boolean done() {
return done;
}
@Override
public final boolean enter() {
return wip.getAndIncrement() == 0;
}
protected final void fastPathEmit(U value, boolean delayError, Disposable dispose) {
final Observer<? super V> observer = downstream;
final SimplePlainQueue<U> q = queue;
if (wip.get() == 0 && wip.compareAndSet(0, 1)) {
accept(observer, value);
if (leave(-1) == 0) {
return;
}
} else {
q.offer(value);
if (!enter()) {
return;
}
}
QueueDrainHelper.drainLoop(q, observer, delayError, dispose, this);
}
/**
* Makes sure the fast-path emits in order.
* @param value the value to emit or queue up
* @param delayError if true, errors are delayed until the source has terminated
* @param disposable the resource to dispose if the drain terminates
*/
protected final void fastPathOrderedEmit(U value, boolean delayError, Disposable disposable) {
final Observer<? super V> observer = downstream;
final SimplePlainQueue<U> q = queue;
if (wip.get() == 0 && wip.compareAndSet(0, 1)) {
if (q.isEmpty()) {
accept(observer, value);
if (leave(-1) == 0) {
return;
}
} else {
q.offer(value);
}
} else {
q.offer(value);
if (!enter()) {
return;
}
}
QueueDrainHelper.drainLoop(q, observer, delayError, disposable, this);
}
@Override
public final Throwable error() {
return error;
}
@Override
public final int leave(int m) {
return wip.addAndGet(m);
}
@Override
public void accept(Observer<? super V> a, U v) {
// ignored by default
}
}
// -------------------------------------------------------------------
// Padding superclasses
//-------------------------------------------------------------------
/** Pads the header away from other fields. */
| QueueDrainObserver |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/generated/sqldefault/ImmutableDefaultTest.java | {
"start": 863,
"end": 2051
} | class ____ {
@Test
public void test(SessionFactoryScope scope) {
BigDecimal unitPrice = new BigDecimal("12.99");
scope.inTransaction( session -> {
OrderLine entity = new OrderLine( unitPrice, 5 );
session.persist(entity);
session.flush();
assertEquals( "new", entity.status );
assertEquals( unitPrice, entity.unitPrice );
assertEquals( 5, entity.quantity );
} );
scope.inTransaction( session -> {
OrderLine entity = session.createQuery("from WithDefault", OrderLine.class ).getSingleResult();
assertEquals( unitPrice, entity.unitPrice );
assertEquals( 5, entity.quantity );
assertEquals( "new", entity.status );
entity.status = "old"; //should be ignored due to @Immutable
} );
scope.inTransaction( session -> {
OrderLine entity = session.createQuery("from WithDefault", OrderLine.class ).getSingleResult();
assertEquals( unitPrice, entity.unitPrice );
assertEquals( 5, entity.quantity );
assertEquals( "new", entity.status );
} );
}
@AfterEach
public void dropTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Entity(name="WithDefault")
public static | ImmutableDefaultTest |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/runtime/src/main/java/io/quarkus/resteasy/reactive/server/runtime/StandardSecurityCheckInterceptor.java | {
"start": 1241,
"end": 2600
} | class ____ {
public static final String STANDARD_SECURITY_CHECK_INTERCEPTOR = StandardSecurityCheckInterceptor.class.getName();
@Inject
AuthorizationController controller;
@AroundInvoke
public Object intercept(InvocationContext ic) throws Exception {
if (controller.isAuthorizationEnabled() && Arc.container() != null
&& Arc.container().requestContext().isActive()
&& CurrentRequestManager.get() != null
&& alreadyDoneByEagerSecurityHandler(
CurrentRequestManager.get().getProperty(STANDARD_SECURITY_CHECK_INTERCEPTOR), ic.getMethod())) {
ic.getContextData().put(SECURITY_HANDLER, EXECUTED);
}
return ic.proceed();
}
private boolean alreadyDoneByEagerSecurityHandler(Object methodWithFinishedChecks, Method method) {
// compare methods: EagerSecurityHandler only intercept endpoints, we still want SecurityHandler run for CDI beans
return methodWithFinishedChecks != null && MethodDescription.ofMethod(method).equals(methodWithFinishedChecks);
}
/**
* Prevent the SecurityHandler from performing {@link RolesAllowed} security checks
*/
@Interceptor
@RolesAllowed("")
@Priority(Interceptor.Priority.LIBRARY_BEFORE - 100)
public static final | StandardSecurityCheckInterceptor |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/util/TestUtil.java | {
"start": 7301,
"end": 7347
} | class ____ generated.
*
* @param clazz the | got |
java | netty__netty | codec-http3/src/main/java/io/netty/handler/codec/http3/QpackUtil.java | {
"start": 948,
"end": 6309
} | class ____ {
private static final QpackException PREFIXED_INTEGER_TOO_LONG =
QpackException.newStatic(QpackDecoder.class, "toIntOrThrow(...)",
"QPACK - invalid prefixed integer");
/**
* Encode integer according to
* <a href="https://tools.ietf.org/html/rfc7541#section-5.1">Section 5.1</a>.
*/
static void encodePrefixedInteger(ByteBuf out, byte mask, int prefixLength, long toEncode) {
checkInRange(toEncode, 0, MAX_UNSIGNED_INT, "toEncode");
int nbits = (1 << prefixLength) - 1;
if (toEncode < nbits) {
out.writeByte((byte) (mask | toEncode));
} else {
out.writeByte((byte) (mask | nbits));
long remainder = toEncode - nbits;
while (remainder > 128) {
byte next = (byte) ((remainder % 128) | 0x80);
out.writeByte(next);
remainder = remainder / 128;
}
out.writeByte((byte) remainder);
}
}
/**
* Decode the integer or return {@code -1} if not enough bytes are readable.
* This method increases the readerIndex when the integer could be decoded.
*
* @param in the input {@link ByteBuf}
* @param prefixLength the prefix length
* @return the integer or {@code -1} if not enough readable bytes are in the {@link ByteBuf).
*/
static int decodePrefixedIntegerAsInt(ByteBuf in, int prefixLength) throws QpackException {
return toIntOrThrow(decodePrefixedInteger(in, prefixLength));
}
/**
* Converts the passed {@code aLong} to an {@code int} if the value can fit an {@code int}, otherwise throws a
* {@link QpackException}.
*
* @param aLong to convert.
* @throws QpackException If the value does not fit an {@code int}.
*/
static int toIntOrThrow(long aLong) throws QpackException {
if ((int) aLong != aLong) {
throw PREFIXED_INTEGER_TOO_LONG;
}
return (int) aLong;
}
/**
* Decode the integer or return {@code -1} if not enough bytes are readable.
* This method increases the readerIndex when the integer could be decoded.
*
* @param in the input {@link ByteBuf}
* @param prefixLength the prefix length
* @return the integer or {@code -1} if not enough readable bytes are in the {@link ByteBuf).
*/
static long decodePrefixedInteger(ByteBuf in, int prefixLength) {
int readerIndex = in.readerIndex();
int writerIndex = in.writerIndex();
if (readerIndex == writerIndex) {
return -1;
}
int nbits = (1 << prefixLength) - 1;
int first = in.readByte() & nbits;
if (first < nbits) {
return first;
}
int idx = readerIndex + 1;
long i = first;
int factor = 0;
byte next;
do {
if (idx == writerIndex) {
in.readerIndex(readerIndex);
return -1;
}
next = in.getByte(idx++);
i += (next & 0x7fL) << factor;
factor += 7;
} while ((next & 0x80) == 0x80);
in.readerIndex(idx);
return i;
}
static boolean firstByteEquals(ByteBuf in, byte mask) {
return (in.getByte(in.readerIndex()) & mask) == mask;
}
/**
* Compare two {@link CharSequence} objects without leaking timing information.
* <p>
* The {@code int} return type is intentional and is designed to allow cascading of constant time operations:
* <pre>
* String s1 = "foo";
* String s2 = "foo";
* String s3 = "foo";
* String s4 = "goo";
* boolean equals = (equalsConstantTime(s1, s2) & equalsConstantTime(s3, s4)) != 0;
* </pre>
* @param s1 the first value.
* @param s2 the second value.
* @return {@code 0} if not equal. {@code 1} if equal.
*/
static int equalsConstantTime(CharSequence s1, CharSequence s2) {
if (s1 instanceof AsciiString && s2 instanceof AsciiString) {
if (s1.length() != s2.length()) {
return 0;
}
AsciiString s1Ascii = (AsciiString) s1;
AsciiString s2Ascii = (AsciiString) s2;
return PlatformDependent.equalsConstantTime(s1Ascii.array(), s1Ascii.arrayOffset(),
s2Ascii.array(), s2Ascii.arrayOffset(), s1.length());
}
return ConstantTimeUtils.equalsConstantTime(s1, s2);
}
/**
* Compare two {@link CharSequence}s.
* @param s1 the first value.
* @param s2 the second value.
* @return {@code false} if not equal. {@code true} if equal.
*/
static boolean equalsVariableTime(CharSequence s1, CharSequence s2) {
return AsciiString.contentEquals(s1, s2);
}
/**
* Calculate the MaxEntries based on
* <a href="https://www.rfc-editor.org/rfc/rfc9204.html#section-4.5.1.1">RFC9204 Section 4.5.1.1</a>.
*
* @param maxTableCapacity the maximum table capacity.
* @return maxEntries.
*/
static long maxEntries(long maxTableCapacity) {
// MaxEntries = floor( MaxTableCapacity / 32 )
return floorDiv(maxTableCapacity, 32);
}
// Section 6.2. Literal Header Field Representation
| QpackUtil |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/generics/GenericManyToOneParameterTest.java | {
"start": 3088,
"end": 3200
} | interface ____ extends Site {
String getName();
}
@Entity( name = "SiteImpl" )
public static abstract | UserSite |
java | grpc__grpc-java | netty/src/main/java/io/grpc/netty/NettyServerHandler.java | {
"start": 48501,
"end": 49170
} | class ____ extends ChannelLogger {
private static final Logger log = Logger.getLogger(ChannelLogger.class.getName());
@Override
public void log(ChannelLogLevel level, String message) {
log.log(toJavaLogLevel(level), message);
}
@Override
public void log(ChannelLogLevel level, String messageFormat, Object... args) {
log(level, MessageFormat.format(messageFormat, args));
}
}
private static Level toJavaLogLevel(ChannelLogLevel level) {
switch (level) {
case ERROR:
return Level.FINE;
case WARNING:
return Level.FINER;
default:
return Level.FINEST;
}
}
}
| ServerChannelLogger |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java | {
"start": 14160,
"end": 32120
} | class ____ for threads that may dynamically
* load user code.
*/
private UserCodeClassLoader userCodeClassLoader;
/**
* <b>IMPORTANT:</b> This constructor may not start any work that would need to be undone in the
* case of a failing task deployment.
*/
public Task(
JobInformation jobInformation,
TaskInformation taskInformation,
ExecutionAttemptID executionAttemptID,
AllocationID slotAllocationId,
List<ResultPartitionDeploymentDescriptor> resultPartitionDeploymentDescriptors,
List<InputGateDeploymentDescriptor> inputGateDeploymentDescriptors,
MemoryManager memManager,
SharedResources sharedResources,
IOManager ioManager,
ShuffleEnvironment<?, ?> shuffleEnvironment,
KvStateService kvStateService,
BroadcastVariableManager bcVarManager,
TaskEventDispatcher taskEventDispatcher,
ExternalResourceInfoProvider externalResourceInfoProvider,
TaskStateManager taskStateManager,
TaskManagerActions taskManagerActions,
InputSplitProvider inputSplitProvider,
CheckpointResponder checkpointResponder,
TaskOperatorEventGateway operatorCoordinatorEventGateway,
GlobalAggregateManager aggregateManager,
LibraryCacheManager.ClassLoaderHandle classLoaderHandle,
FileCache fileCache,
TaskManagerRuntimeInfo taskManagerConfig,
@Nonnull TaskMetricGroup metricGroup,
PartitionProducerStateChecker partitionProducerStateChecker,
Executor executor,
ChannelStateWriteRequestExecutorFactory channelStateExecutorFactory) {
Preconditions.checkNotNull(jobInformation);
Preconditions.checkNotNull(taskInformation);
this.jobInfo = new JobInfoImpl(jobInformation.getJobId(), jobInformation.getJobName());
this.taskInfo =
new TaskInfoImpl(
taskInformation.getTaskName(),
taskInformation.getMaxNumberOfSubtasks(),
executionAttemptID.getSubtaskIndex(),
taskInformation.getNumberOfSubtasks(),
executionAttemptID.getAttemptNumber(),
String.valueOf(slotAllocationId));
this.jobId = jobInformation.getJobId();
this.jobType = jobInformation.getJobType();
this.vertexId = taskInformation.getJobVertexId();
this.executionId = Preconditions.checkNotNull(executionAttemptID);
this.allocationId = Preconditions.checkNotNull(slotAllocationId);
this.taskNameWithSubtask = taskInfo.getTaskNameWithSubtasks();
this.jobConfiguration = jobInformation.getJobConfiguration();
this.taskConfiguration = taskInformation.getTaskConfiguration();
this.requiredJarFiles = jobInformation.getRequiredJarFileBlobKeys();
this.requiredClasspaths = jobInformation.getRequiredClasspathURLs();
this.nameOfInvokableClass = taskInformation.getInvokableClassName();
this.serializedExecutionConfig = jobInformation.getSerializedExecutionConfig();
Configuration tmConfig = taskManagerConfig.getConfiguration();
this.taskCancellationInterval =
tmConfig.get(TaskManagerOptions.TASK_CANCELLATION_INTERVAL).toMillis();
this.taskCancellationTimeout =
tmConfig.get(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT).toMillis();
this.memoryManager = Preconditions.checkNotNull(memManager);
this.sharedResources = Preconditions.checkNotNull(sharedResources);
this.ioManager = Preconditions.checkNotNull(ioManager);
this.broadcastVariableManager = Preconditions.checkNotNull(bcVarManager);
this.taskEventDispatcher = Preconditions.checkNotNull(taskEventDispatcher);
this.taskStateManager = Preconditions.checkNotNull(taskStateManager);
this.accumulatorRegistry = new AccumulatorRegistry(jobId, executionId);
this.inputSplitProvider = Preconditions.checkNotNull(inputSplitProvider);
this.checkpointResponder = Preconditions.checkNotNull(checkpointResponder);
this.operatorCoordinatorEventGateway =
Preconditions.checkNotNull(operatorCoordinatorEventGateway);
this.aggregateManager = Preconditions.checkNotNull(aggregateManager);
this.taskManagerActions = checkNotNull(taskManagerActions);
this.externalResourceInfoProvider = checkNotNull(externalResourceInfoProvider);
this.classLoaderHandle = Preconditions.checkNotNull(classLoaderHandle);
this.fileCache = Preconditions.checkNotNull(fileCache);
this.kvStateService = Preconditions.checkNotNull(kvStateService);
this.taskManagerConfig = Preconditions.checkNotNull(taskManagerConfig);
this.metrics = metricGroup;
this.partitionProducerStateChecker =
Preconditions.checkNotNull(partitionProducerStateChecker);
this.executor = Preconditions.checkNotNull(executor);
this.channelStateExecutorFactory = channelStateExecutorFactory;
// create the reader and writer structures
final String taskNameWithSubtaskAndId = taskNameWithSubtask + " (" + executionId + ')';
final ShuffleIOOwnerContext taskShuffleContext =
shuffleEnvironment.createShuffleIOOwnerContext(
taskNameWithSubtaskAndId, executionId, metrics.getIOMetricGroup());
// produced intermediate result partitions
final ResultPartitionWriter[] resultPartitionWriters =
shuffleEnvironment
.createResultPartitionWriters(
taskShuffleContext, resultPartitionDeploymentDescriptors)
.toArray(new ResultPartitionWriter[] {});
this.partitionWriters = resultPartitionWriters;
// consumed intermediate result partitions
final IndexedInputGate[] gates =
shuffleEnvironment
.createInputGates(taskShuffleContext, this, inputGateDeploymentDescriptors)
.toArray(new IndexedInputGate[0]);
this.inputGates = new IndexedInputGate[gates.length];
int counter = 0;
for (IndexedInputGate gate : gates) {
inputGates[counter++] =
new InputGateWithMetrics(
gate, metrics.getIOMetricGroup().getNumBytesInCounter());
}
invokableHasBeenCanceled = new AtomicBoolean(false);
// finally, create the executing thread, but do not start it
executingThread = new Thread(TASK_THREADS_GROUP, this, taskNameWithSubtask);
}
// ------------------------------------------------------------------------
// Accessors
// ------------------------------------------------------------------------
@Override
public JobID getJobID() {
return jobId;
}
public JobVertexID getJobVertexId() {
return vertexId;
}
@Override
public ExecutionAttemptID getExecutionId() {
return executionId;
}
@Override
public AllocationID getAllocationId() {
return allocationId;
}
public TaskInfo getTaskInfo() {
return taskInfo;
}
public Configuration getJobConfiguration() {
return jobConfiguration;
}
public Configuration getTaskConfiguration() {
return this.taskConfiguration;
}
public AccumulatorRegistry getAccumulatorRegistry() {
return accumulatorRegistry;
}
public TaskMetricGroup getMetricGroup() {
return metrics;
}
public Thread getExecutingThread() {
return executingThread;
}
@Override
public CompletableFuture<ExecutionState> getTerminationFuture() {
return terminationFuture;
}
@VisibleForTesting
long getTaskCancellationInterval() {
return taskCancellationInterval;
}
@VisibleForTesting
long getTaskCancellationTimeout() {
return taskCancellationTimeout;
}
@Nullable
@VisibleForTesting
TaskInvokable getInvokable() {
return invokable;
}
public boolean isBackPressured() {
if (invokable == null
|| partitionWriters.length == 0
|| (executionState != ExecutionState.INITIALIZING
&& executionState != ExecutionState.RUNNING)) {
return false;
}
for (int i = 0; i < partitionWriters.length; ++i) {
if (!partitionWriters[i].isAvailable()) {
return true;
}
}
return false;
}
// ------------------------------------------------------------------------
// Task Execution
// ------------------------------------------------------------------------
/**
* Returns the current execution state of the task.
*
* @return The current execution state of the task.
*/
public ExecutionState getExecutionState() {
return this.executionState;
}
/**
* Checks whether the task has failed, is canceled, or is being canceled at the moment.
*
* @return True is the task in state FAILED, CANCELING, or CANCELED, false otherwise.
*/
public boolean isCanceledOrFailed() {
return executionState == ExecutionState.CANCELING
|| executionState == ExecutionState.CANCELED
|| executionState == ExecutionState.FAILED;
}
/**
* If the task has failed, this method gets the exception that caused this task to fail.
* Otherwise this method returns null.
*
* @return The exception that caused the task to fail, or null, if the task has not failed.
*/
public Throwable getFailureCause() {
return failureCause;
}
/** Starts the task's thread. */
public void startTaskThread() {
executingThread.start();
}
/** The core work method that bootstraps the task and executes its code. */
@Override
public void run() {
try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) {
doRun();
} finally {
terminationFuture.complete(executionState);
}
}
private void doRun() {
// ----------------------------
// Initial State transition
// ----------------------------
while (true) {
ExecutionState current = this.executionState;
if (current == ExecutionState.CREATED) {
if (transitionState(ExecutionState.CREATED, ExecutionState.DEPLOYING)) {
// success, we can start our work
break;
}
} else if (current == ExecutionState.FAILED) {
// we were immediately failed. tell the TaskManager that we reached our final state
notifyFinalState();
if (metrics != null) {
metrics.close();
}
return;
} else if (current == ExecutionState.CANCELING) {
if (transitionState(ExecutionState.CANCELING, ExecutionState.CANCELED)) {
// we were immediately canceled. tell the TaskManager that we reached our final
// state
notifyFinalState();
if (metrics != null) {
metrics.close();
}
return;
}
} else {
if (metrics != null) {
metrics.close();
}
throw new IllegalStateException(
"Invalid state for beginning of operation of task " + this + '.');
}
}
// all resource acquisitions and registrations from here on
// need to be undone in the end
Map<String, Future<Path>> distributedCacheEntries = new HashMap<>();
TaskInvokable invokable = null;
// Registry that can be used to execute actions after the task has already failed. These
// actions are fired in the registration order.
AutoCloseableRegistry postFailureCleanUpRegistry = new AutoCloseableRegistry(false);
try {
// ----------------------------
// Task Bootstrap - We periodically
// check for canceling as a shortcut
// ----------------------------
// activate safety net for task thread
LOG.debug("Creating FileSystem stream leak safety net for task {}", this);
FileSystemSafetyNet.initializeSafetyNetForThread();
// first of all, get a user-code classloader
// this may involve downloading the job's JAR files and/or classes
LOG.info("Loading JAR files for task {}.", this);
userCodeClassLoader = createUserCodeClassloader();
final ExecutionConfig executionConfig =
serializedExecutionConfig.deserializeValue(userCodeClassLoader.asClassLoader());
Configuration executionConfigConfiguration = executionConfig.toConfiguration();
// override task cancellation interval from Flink config if set in ExecutionConfig
taskCancellationInterval =
executionConfigConfiguration
.getOptional(TaskManagerOptions.TASK_CANCELLATION_INTERVAL)
.orElse(Duration.ofMillis(taskCancellationInterval))
.toMillis();
// override task cancellation timeout from Flink config if set in ExecutionConfig
taskCancellationTimeout =
executionConfigConfiguration
.getOptional(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT)
.orElse(Duration.ofMillis(taskCancellationTimeout))
.toMillis();
if (isCanceledOrFailed()) {
throw new CancelTaskException();
}
// ----------------------------------------------------------------
// register the task with the network stack
// this operation may fail if the system does not have enough
// memory to run the necessary data exchanges
// the registration must also strictly be undone
// ----------------------------------------------------------------
LOG.debug("Registering task at network: {}.", this);
setupPartitionsAndGates(partitionWriters, inputGates);
for (ResultPartitionWriter partitionWriter : partitionWriters) {
taskEventDispatcher.registerPartition(partitionWriter.getPartitionId());
}
// next, kick off the background copying of files for the distributed cache
try {
for (Map.Entry<String, DistributedCache.DistributedCacheEntry> entry :
DistributedCache.readFileInfoFromConfig(jobConfiguration)) {
LOG.info("Obtaining local cache file for '{}'.", entry.getKey());
Future<Path> cp =
fileCache.createTmpFile(
entry.getKey(), entry.getValue(), jobId, executionId);
distributedCacheEntries.put(entry.getKey(), cp);
}
} catch (Exception e) {
throw new Exception(
String.format(
"Exception while adding files to distributed cache of task %s (%s).",
taskNameWithSubtask, executionId),
e);
}
if (isCanceledOrFailed()) {
throw new CancelTaskException();
}
// ----------------------------------------------------------------
// call the user code initialization methods
// ----------------------------------------------------------------
TaskKvStateRegistry kvStateRegistry =
kvStateService.createKvStateTaskRegistry(jobId, getJobVertexId());
Environment env =
new RuntimeEnvironment(
jobId,
jobType,
vertexId,
executionId,
executionConfig,
jobInfo,
taskInfo,
jobConfiguration,
taskConfiguration,
userCodeClassLoader,
memoryManager,
sharedResources,
ioManager,
broadcastVariableManager,
taskStateManager,
aggregateManager,
accumulatorRegistry,
kvStateRegistry,
inputSplitProvider,
distributedCacheEntries,
partitionWriters,
inputGates,
taskEventDispatcher,
checkpointResponder,
operatorCoordinatorEventGateway,
taskManagerConfig,
metrics,
this,
externalResourceInfoProvider,
channelStateExecutorFactory,
taskManagerActions);
// Make sure the user code classloader is accessible thread-locally.
// We are setting the correct context | loader |
java | quarkusio__quarkus | extensions/devui/deployment/src/test/java/io/quarkus/devui/testrunner/CoupledService.java | {
"start": 45,
"end": 138
} | class ____ {
public static String service() {
return "unit";
}
}
| CoupledService |
java | micronaut-projects__micronaut-core | http-server-netty/src/main/java/io/micronaut/http/server/netty/DefaultHttpContentProcessor.java | {
"start": 1044,
"end": 1201
} | class ____ handle subscribing to a stream of {@link io.netty.handler.codec.http.HttpContent}.
*
* @author Graeme Rocher
* @since 1.0
*/
@Internal
public | will |
java | apache__flink | flink-core/src/main/java/org/apache/flink/configuration/ConfigOptions.java | {
"start": 6225,
"end": 7585
} | class ____<T> {
private final String key;
private final Class<T> clazz;
TypedConfigOptionBuilder(String key, Class<T> clazz) {
this.key = key;
this.clazz = clazz;
}
/** Defines that the option's type should be a list of previously defined atomic type. */
public ListConfigOptionBuilder<T> asList() {
return new ListConfigOptionBuilder<>(key, clazz);
}
/**
* Creates a ConfigOption with the given default value.
*
* @param value The default value for the config option
* @return The config option with the default value.
*/
public ConfigOption<T> defaultValue(T value) {
return new ConfigOption<>(key, clazz, ConfigOption.EMPTY_DESCRIPTION, value, false);
}
/**
* Creates a ConfigOption without a default value.
*
* @return The config option without a default value.
*/
public ConfigOption<T> noDefaultValue() {
return new ConfigOption<>(
key, clazz, Description.builder().text("").build(), null, false);
}
}
/**
* Builder for {@link ConfigOption} of list of type {@link E}.
*
* @param <E> list element type of the option
*/
public static | TypedConfigOptionBuilder |
java | quarkusio__quarkus | extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/samesite/SameSiteCookieTestCase.java | {
"start": 507,
"end": 2361
} | class ____ {
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(SetCookieHandler.class)
.addAsResource(new StringAsset(
"quarkus.http.same-site-cookie.cookie1.value=Lax\n" +
"quarkus.http.same-site-cookie.cookie2.value=Lax\n" +
"quarkus.http.same-site-cookie.cookie2.case-sensitive=true\n" +
"quarkus.http.same-site-cookie.cookie3.value=None\n"),
"application.properties");
}
});
@Test
public void testSameSiteCookie() {
RestAssured.get("/cookie")
.then().cookies(new HashMap<>())
.cookie("cookie1", RestAssuredMatchers.detailedCookie().sameSite("Lax"))
.cookie("COOKIE2", RestAssuredMatchers.detailedCookie().sameSite(Matchers.nullValue()))
.cookie("cookie3", RestAssuredMatchers.detailedCookie().sameSite("None"))
.cookie("cookie3", RestAssuredMatchers.detailedCookie().secured(true));
RestAssured.with().header("user-agent", "Chromium/53 foo").get("/cookie")
.then().cookies(new HashMap<>())
.cookie("cookie1", RestAssuredMatchers.detailedCookie().sameSite("Lax"))
.cookie("COOKIE2", RestAssuredMatchers.detailedCookie().sameSite(Matchers.nullValue()))
.cookie("cookie3", RestAssuredMatchers.detailedCookie().sameSite(Matchers.nullValue()));
}
}
| SameSiteCookieTestCase |
java | quarkusio__quarkus | extensions/kubernetes/spi/src/main/java/io/quarkus/kubernetes/spi/KubernetesEnvBuildItem.java | {
"start": 263,
"end": 8943
} | enum ____ {
var(false),
field(false),
secret(true),
configmap(true),
keyFromConfigmap(false),
keyFromSecret(false);
public final boolean allowMultipleDefinitions;
EnvType(boolean allowMultipleDefinitions) {
this.allowMultipleDefinitions = allowMultipleDefinitions;
}
public boolean mightConflictWith(EnvType type) {
if (this == type) {
return true;
}
return switch (this) {
case field -> type == var || type == keyFromConfigmap || type == keyFromSecret;
case var -> type == field || type == keyFromConfigmap || type == keyFromSecret;
case secret -> type == configmap;
case configmap -> type == secret;
case keyFromConfigmap -> type == field || type == var || type == keyFromSecret;
case keyFromSecret -> type == field || type == var || type == keyFromConfigmap;
};
}
}
private final String name;
private final String value;
private final String configmap;
private final String secret;
private final String field;
private final EnvType type;
private final boolean oldStyle;
private final String prefix;
public static KubernetesEnvBuildItem createFromField(String name, String targetField, String target,
boolean... oldStyle) {
return create(name, null, null, null, targetField, target, null, isOldStyle(oldStyle));
}
public static KubernetesEnvBuildItem createFromConfigMap(String configMapName, String target, String prefix,
boolean... oldStyle) {
return create(configMapName, null, null, configMapName, null, target, prefix, isOldStyle(oldStyle));
}
public static KubernetesEnvBuildItem createFromSecret(String secretName, String target, String prefix,
boolean... oldStyle) {
return create(secretName, null, secretName, null, null, target, prefix, isOldStyle(oldStyle));
}
public static KubernetesEnvBuildItem createSimpleVar(String name, String value, String target,
boolean... oldStyle) {
return create(name, value, null, null, null, target, null, isOldStyle(oldStyle));
}
public static KubernetesEnvBuildItem createFromConfigMapKey(String varName, String key, String configmap, String target,
String prefix, boolean... oldStyle) {
return create(varName, key, null, configmap, null, target, prefix, isOldStyle(oldStyle));
}
@SuppressWarnings("unused")
public static KubernetesEnvBuildItem createFromSecretKey(String varName, String key, String secret, String target,
String prefix, boolean... oldStyle) {
return create(varName, key, secret, null, null, target, prefix, isOldStyle(oldStyle));
}
public static KubernetesEnvBuildItem createFromResourceKey(String varName, String key, String secret,
String configmap, String target, boolean... oldStyle) {
return create(varName, key, secret, configmap, null, target, null, isOldStyle(oldStyle));
}
public static KubernetesEnvBuildItem create(String name, String value, String secret, String configmap, String field,
String target, String prefix, boolean... oldStyle) throws IllegalArgumentException {
final boolean secretPresent = secret != null;
final boolean configmapPresent = configmap != null;
final boolean valuePresent = value != null;
final boolean fieldPresent = field != null;
if (valuePresent) {
if (secretPresent && configmapPresent) {
throw new IllegalArgumentException(String.format(
"'%s' env var can't simultaneously take its value from '%s' configmap & '%s' secret",
name, configmap, secret));
}
if (fieldPresent) {
throw new IllegalArgumentException(String.format(
"'%s' env var can't simultaneously have a '%s' value & take is value from the '%s' field",
name, value, field));
}
}
if (secretPresent && configmapPresent) {
log.warn(String.format("The '%s' name was used to try to import both from '%s' secret & '%s' configmap. " +
"Only values from '%s' secret will be imported.\nIf you want to import from both, use a " +
"different property name for either.",
name, secret,
configmap,
secret));
}
final EnvType type;
if (secretPresent) {
if (valuePresent) {
type = EnvType.keyFromSecret;
} else {
name = secret;
type = EnvType.secret;
}
} else if (configmapPresent) {
if (valuePresent) {
type = EnvType.keyFromConfigmap;
} else {
name = configmap;
type = EnvType.configmap;
}
} else if (field != null) {
type = EnvType.field;
} else {
type = EnvType.var;
}
return new KubernetesEnvBuildItem(name, value, configmap, secret, field, type, target, prefix, isOldStyle(oldStyle));
}
private static boolean isOldStyle(boolean[] oldStyle) {
return oldStyle.length >= 1 && oldStyle[0];
}
KubernetesEnvBuildItem(String name, String value, String configmap, String secret, String field, EnvType type,
String target, String prefix, boolean oldStyle) {
super(target);
this.name = name;
this.value = value;
this.configmap = configmap;
this.secret = secret;
this.field = field;
this.type = type;
this.prefix = prefix;
this.oldStyle = oldStyle;
}
public String getConfigMap() {
return configmap;
}
public String getSecret() {
return secret;
}
public String getField() {
return field;
}
public boolean isOldStyle() {
return oldStyle;
}
public String getName() {
return name;
}
public String getValue() {
return value;
}
public EnvType getType() {
return type;
}
public String getPrefix() {
return prefix;
}
@SuppressWarnings("unused")
public KubernetesEnvBuildItem newWithTarget(String newTarget) {
return new KubernetesEnvBuildItem(this.name, this.value, this.configmap, this.secret, this.field, this.type, newTarget,
this.prefix, this.oldStyle);
}
public String toString() {
return switch (type) {
case var -> String.format("'%s' env var with value '%s'", name, value);
case field -> String.format("'%s' env var with value from field '%s'", name, field);
case secret -> "all values from '" + secret + "' secret";
case configmap -> "all values from '" + configmap + "' configmap";
case keyFromConfigmap ->
String.format("'%s' env var with value from '%s' key of '%s' configmap", name, value, configmap);
case keyFromSecret ->
String.format("'%s' env var with value from '%s' key of '%s' secret", name, value, secret);
};
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
KubernetesEnvBuildItem that = (KubernetesEnvBuildItem) o;
if (!name.equals(that.name))
return false;
if (!Objects.equals(value, that.value))
return false;
if (!Objects.equals(configmap, that.configmap))
return false;
if (!Objects.equals(secret, that.secret))
return false;
if (!Objects.equals(field, that.field))
return false;
if (!Objects.equals(prefix, that.prefix))
return false;
return type == that.type;
}
@Override
public int hashCode() {
int result = name.hashCode();
result = 31 * result + (value != null ? value.hashCode() : 0);
result = 31 * result + (configmap != null ? configmap.hashCode() : 0);
result = 31 * result + (secret != null ? secret.hashCode() : 0);
result = 31 * result + (field != null ? field.hashCode() : 0);
result = 31 * result + type.hashCode();
result = 31 * result + (prefix != null ? prefix.hashCode() : 0);
return result;
}
}
| EnvType |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/RedundantNullCheckTest.java | {
"start": 7871,
"end": 8570
} | class ____ {
void process(Map<String, String> map, String key) {
String value = map.get(key);
if (value == null) {
/* This check should NOT be redundant */
}
}
}
""")
.doTest();
}
@Test
public void negative_variableInitializedFromAnnotatedLib_returnNullable_inNullMarkedScope() {
compilationHelper
.addSourceLines(
"AnnotatedLibNullable.java",
"""
package mylib;
import org.jspecify.annotations.NullMarked;
import org.jspecify.annotations.Nullable;
@NullMarked
public | Test |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/type/InheritedAnnotationsAnnotationMetadataTests.java | {
"start": 6643,
"end": 6733
} | interface ____ {
}
@InheritedComposedAnnotation
private static | InheritedComposedAnnotation |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/schedulers/SchedulerWhen.java | {
"start": 4467,
"end": 6599
} | class ____ extends Scheduler implements Disposable {
private final Scheduler actualScheduler;
private final FlowableProcessor<Flowable<Completable>> workerProcessor;
private Disposable disposable;
public SchedulerWhen(Function<Flowable<Flowable<Completable>>, Completable> combine, Scheduler actualScheduler) {
this.actualScheduler = actualScheduler;
// workers are converted into completables and put in this queue.
this.workerProcessor = UnicastProcessor.<Flowable<Completable>>create().toSerialized();
// send it to a custom combinator to pick the order and rate at which
// workers are processed.
try {
disposable = combine.apply(workerProcessor).subscribe();
} catch (Throwable e) {
throw ExceptionHelper.wrapOrThrow(e);
}
}
@Override
public void dispose() {
disposable.dispose();
}
@Override
public boolean isDisposed() {
return disposable.isDisposed();
}
@NonNull
@Override
public Worker createWorker() {
final Worker actualWorker = actualScheduler.createWorker();
// a queue for the actions submitted while worker is waiting to get to
// the subscribe to off the workerQueue.
final FlowableProcessor<ScheduledAction> actionProcessor = UnicastProcessor.<ScheduledAction>create().toSerialized();
// convert the work of scheduling all the actions into a completable
Flowable<Completable> actions = actionProcessor.map(new CreateWorkerFunction(actualWorker));
// a worker that queues the action to the actionQueue subject.
Worker worker = new QueueWorker(actionProcessor, actualWorker);
// enqueue the completable that process actions put in reply subject
workerProcessor.onNext(actions);
// return the worker that adds actions to the reply subject
return worker;
}
static final Disposable SUBSCRIBED = new SubscribedDisposable();
static final Disposable DISPOSED = Disposable.disposed();
@SuppressWarnings("serial")
abstract static | SchedulerWhen |
java | spring-projects__spring-boot | module/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/endpoint/web/WebEndpointResponse.java | {
"start": 1281,
"end": 4379
} | class ____<T> {
/**
* {@code 200 OK}.
*/
public static final int STATUS_OK = 200;
/**
* {@code 204 No Content}.
*/
public static final int STATUS_NO_CONTENT = 204;
/**
* {@code 400 Bad Request}.
*/
public static final int STATUS_BAD_REQUEST = 400;
/**
* {@code 404 Not Found}.
*/
public static final int STATUS_NOT_FOUND = 404;
/**
* {@code 429 Too Many Requests}.
*/
public static final int STATUS_TOO_MANY_REQUESTS = 429;
/**
* {@code 500 Internal Server Error}.
*/
public static final int STATUS_INTERNAL_SERVER_ERROR = 500;
/**
* {@code 503 Service Unavailable}.
*/
public static final int STATUS_SERVICE_UNAVAILABLE = 503;
private final @Nullable T body;
private final int status;
private final @Nullable MimeType contentType;
/**
* Creates a new {@code WebEndpointResponse} with no body and a 200 (OK) status.
*/
public WebEndpointResponse() {
this(null);
}
/**
* Creates a new {@code WebEndpointResponse} with no body and the given
* {@code status}.
* @param status the HTTP status
*/
public WebEndpointResponse(int status) {
this(null, status);
}
/**
* Creates a new {@code WebEndpointResponse} with the given body and a 200 (OK)
* status.
* @param body the body
*/
public WebEndpointResponse(@Nullable T body) {
this(body, STATUS_OK);
}
/**
* Creates a new {@code WebEndpointResponse} with the given body and content type and
* a 200 (OK) status.
* @param body the body
* @param producible the producible providing the content type
* @since 2.5.0
*/
public WebEndpointResponse(@Nullable T body, Producible<?> producible) {
this(body, STATUS_OK, producible.getProducedMimeType());
}
/**
* Creates a new {@code WebEndpointResponse} with the given body and content type and
* a 200 (OK) status.
* @param body the body
* @param contentType the content type of the response
* @since 2.5.0
*/
public WebEndpointResponse(@Nullable T body, MimeType contentType) {
this(body, STATUS_OK, contentType);
}
/**
* Creates a new {@code WebEndpointResponse} with the given body and status.
* @param body the body
* @param status the HTTP status
*/
public WebEndpointResponse(@Nullable T body, int status) {
this(body, status, null);
}
/**
* Creates a new {@code WebEndpointResponse} with the given body and status.
* @param body the body
* @param status the HTTP status
* @param contentType the content type of the response
* @since 2.5.0
*/
public WebEndpointResponse(@Nullable T body, int status, @Nullable MimeType contentType) {
this.body = body;
this.status = status;
this.contentType = contentType;
}
/**
* Returns the content type of the response.
* @return the content type;
*/
public @Nullable MimeType getContentType() {
return this.contentType;
}
/**
* Returns the body for the response.
* @return the body
*/
public @Nullable T getBody() {
return this.body;
}
/**
* Returns the status for the response.
* @return the status
*/
public int getStatus() {
return this.status;
}
}
| WebEndpointResponse |
java | spring-projects__spring-framework | spring-context/src/testFixtures/java/org/springframework/context/testfixture/jndi/SimpleNamingContext.java | {
"start": 11327,
"end": 11656
} | class ____ extends AbstractNamingEnumeration<Binding> {
private BindingEnumeration(SimpleNamingContext context, String root) throws NamingException {
super(context, root);
}
@Override
protected Binding createObject(String strippedName, Object obj) {
return new Binding(strippedName, obj);
}
}
}
| BindingEnumeration |
java | processing__processing4 | core/src/processing/core/PShape.java | {
"start": 2547,
"end": 2810
} | class ____ all).
* <li>a means of creating PShape objects ala beginShape() and endShape().
* <li>load(), update(), and cache methods ala PImage, so that shapes can have
* renderer-specific optimizations, such as vertex arrays in OpenGL.
* <li>splitting this | after |
java | google__guava | android/guava/src/com/google/common/reflect/ClassPath.java | {
"start": 12234,
"end": 12362
} | class ____ given in the source code.
*
* <p>Behaves similarly to {@link Class#getSimpleName()} but does not require the | as |
java | google__guice | core/src/com/google/inject/internal/PackageNameCompressor.java | {
"start": 2968,
"end": 3247
} | class ____.
+ "[A-Z][\\w$]*)");
// Pattern used to filter out quoted strings that should not have their package name compressed.
// Picked '"' here because Guice uses it when including a string literal in an error message. This
// will allow user to include | name |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/checkreturnvalue/ResultUseRule.java | {
"start": 2588,
"end": 3700
} | class ____<C, S, M extends S> extends ResultUseRule<C, S> {
private static final ImmutableSet<RuleScope> SCOPES = ImmutableSet.of(METHOD);
private final Class<M> methodSymbolClass;
protected MethodRule(Class<M> methodSymbolClass) {
this.methodSymbolClass = methodSymbolClass;
}
@Override
public final ImmutableSet<RuleScope> scopes() {
return SCOPES;
}
/**
* Evaluates the given {@code method} and optionally returns a {@link ResultUsePolicy} for it.
*/
public abstract Optional<ResultUsePolicy> evaluateMethod(M method, C context);
@Override
public final Optional<ResultUsePolicy> evaluate(S symbol, C context) {
return methodSymbolClass.isInstance(symbol)
? evaluateMethod(methodSymbolClass.cast(symbol), context)
: Optional.empty();
}
}
/**
* A rule that evaluates symbols of any kind to determine a {@link ResultUsePolicy} to associate
* with them.
*
* @param <C> the type of the context object used during evaluation
* @param <S> the type of symbols
*/
public abstract static | MethodRule |
java | apache__dubbo | dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/support/RegistryManager.java | {
"start": 1869,
"end": 6244
} | class ____ {
private static final ErrorTypeAwareLogger LOGGER = LoggerFactory.getErrorTypeAwareLogger(RegistryManager.class);
private ApplicationModel applicationModel;
/**
* Registry Collection Map<RegistryAddress, Registry>
*/
private final Map<String, Registry> registries = new ConcurrentHashMap<>();
/**
* The lock for the acquisition process of the registry
*/
protected final ReentrantLock lock = new ReentrantLock();
private final AtomicBoolean destroyed = new AtomicBoolean(false);
public RegistryManager(ApplicationModel applicationModel) {
this.applicationModel = applicationModel;
}
/**
* Get all registries
*
* @return all registries
*/
public Collection<Registry> getRegistries() {
return Collections.unmodifiableCollection(new LinkedList<>(registries.values()));
}
public Registry getRegistry(String key) {
return registries.get(key);
}
public void putRegistry(String key, Registry registry) {
registries.put(key, registry);
}
public List<ServiceDiscovery> getServiceDiscoveries() {
return getRegistries().stream()
.filter(registry -> registry instanceof ServiceDiscoveryRegistry)
.map(registry -> (ServiceDiscoveryRegistry) registry)
.map(ServiceDiscoveryRegistry::getServiceDiscovery)
.collect(Collectors.toList());
}
/**
* Close all created registries
*/
public void destroyAll() {
if (!destroyed.compareAndSet(false, true)) {
return;
}
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Close all registries " + getRegistries());
}
// Lock up the registry shutdown process
lock.lock();
try {
for (Registry registry : getRegistries()) {
try {
registry.destroy();
} catch (Throwable e) {
LOGGER.warn(INTERNAL_ERROR, "unknown error in registry module", "", e.getMessage(), e);
}
}
registries.clear();
} finally {
// Release the lock
lock.unlock();
}
}
/**
* Reset state of AbstractRegistryFactory
*/
public void reset() {
destroyed.set(false);
registries.clear();
}
protected Registry getDefaultNopRegistryIfDestroyed() {
if (destroyed.get()) {
// 1-12 Failed to fetch (server) instance since the registry instances have been destroyed.
LOGGER.warn(
REGISTRY_FAILED_FETCH_INSTANCE,
"misuse of the methods",
"",
"All registry instances have been destroyed, failed to fetch any instance. "
+ "Usually, this means no need to try to do unnecessary redundant resource clearance, all registries has been taken care of.");
return DEFAULT_NOP_REGISTRY;
}
return null;
}
protected Lock getRegistryLock() {
return lock;
}
public void removeDestroyedRegistry(Registry toRm) {
lock.lock();
try {
registries.entrySet().removeIf(entry -> entry.getValue().equals(toRm));
} finally {
lock.unlock();
}
}
// for unit test
public void clearRegistryNotDestroy() {
registries.clear();
}
public static RegistryManager getInstance(ApplicationModel applicationModel) {
return applicationModel.getBeanFactory().getBean(RegistryManager.class);
}
private static final Registry DEFAULT_NOP_REGISTRY = new Registry() {
@Override
public URL getUrl() {
return null;
}
@Override
public boolean isAvailable() {
return false;
}
@Override
public void destroy() {}
@Override
public void register(URL url) {}
@Override
public void unregister(URL url) {}
@Override
public void subscribe(URL url, NotifyListener listener) {}
@Override
public void unsubscribe(URL url, NotifyListener listener) {}
@Override
public List<URL> lookup(URL url) {
return null;
}
};
}
| RegistryManager |
java | quarkusio__quarkus | independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/InvokerBuilder.java | {
"start": 2142,
"end": 2752
} | class ____ {
* static String repeatTwice(String str) {
* return str + " " + str;
* }
* }
* </pre>
*
* Then, assuming we have obtained the {@code InvokerBuilder} for {@code MyService.hello()},
* we can set up the lookup and transformations and build an invoker like so:
*
* <pre>
* builder.withInstanceLookup()
* .withArgumentTransformer(0, String.class, "toUpperCase")
* .withReturnValueTransformer(Transformations.class, "repeatTwice")
* .build();
* </pre>
*
* The resulting invoker will be equivalent to the following class:
*
* <pre>
* | Transformations |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/event/RefreshRoutesResultEvent.java | {
"start": 805,
"end": 1231
} | class ____ extends ApplicationEvent {
private @Nullable Throwable throwable;
public RefreshRoutesResultEvent(Object source, Throwable throwable) {
super(source);
this.throwable = throwable;
}
public RefreshRoutesResultEvent(Object source) {
super(source);
}
public @Nullable Throwable getThrowable() {
return throwable;
}
public boolean isSuccess() {
return throwable == null;
}
}
| RefreshRoutesResultEvent |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4344ManagedPluginExecutionOrderTest.java | {
"start": 1150,
"end": 2084
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that custom executions from managed plugins which are part of the default lifecycle bindings get
* executed after executions from plugins that are defined in the regular build section and bound to the
* same phase.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-4344");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("process-resources");
verifier.execute();
verifier.verifyErrorFreeLog();
List<String> lines = verifier.loadLines("target/log.txt");
assertEquals(Arrays.asList(new String[] {"first", "second"}), lines);
}
}
| MavenITmng4344ManagedPluginExecutionOrderTest |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/SplitPackagesAuditPrecommitPlugin.java | {
"start": 913,
"end": 2177
} | class ____ extends PrecommitPlugin {
public static final String TASK_NAME = "splitPackagesAudit";
@Override
public TaskProvider<? extends Task> createTask(Project project) {
TaskProvider<SplitPackagesAuditTask> task = project.getTasks().register(TASK_NAME, SplitPackagesAuditTask.class);
task.configure(t -> {
t.setProjectBuildDirs(getProjectBuildDirs(project));
t.setClasspath(project.getConfigurations().getByName(JavaPlugin.COMPILE_CLASSPATH_CONFIGURATION_NAME));
SourceSet mainSourceSet = GradleUtils.getJavaSourceSets(project).findByName(SourceSet.MAIN_SOURCE_SET_NAME);
t.dependsOn(mainSourceSet.getJava().getSourceDirectories());
t.getSrcDirs().set(project.provider(() -> mainSourceSet.getAllSource().getSrcDirs()));
});
return task;
}
private static Map<File, String> getProjectBuildDirs(Project project) {
// while this is done in every project, it should be cheap to calculate
Map<File, String> buildDirs = new HashMap<>();
for (Project p : project.getRootProject().getAllprojects()) {
buildDirs.put(p.getBuildDir(), p.getPath());
}
return buildDirs;
}
}
| SplitPackagesAuditPrecommitPlugin |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalSupplierTests.java | {
"start": 1291,
"end": 6054
} | class ____ extends AbstractWireTestCase<LocalSupplier> {
private static final NavigableSet<TransportVersion> DEFAULT_BWC_VERSIONS = getAllBWCVersions();
private static final TransportVersion ESQL_LOCAL_RELATION_WITH_NEW_BLOCKS = TransportVersion.fromName(
"esql_local_relation_with_new_blocks"
);
private static final BlockFactory BLOCK_FACTORY = BlockFactory.getInstance(
new NoopCircuitBreaker("noop-esql-breaker"),
BigArrays.NON_RECYCLING_INSTANCE
);
private static NavigableSet<TransportVersion> getAllBWCVersions() {
return TransportVersionUtils.allReleasedVersions().tailSet(TransportVersion.minimumCompatible(), true);
}
public final void testBwcSerialization() throws IOException {
for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) {
LocalSupplier testInstance = createTestInstance();
for (TransportVersion bwcVersion : DEFAULT_BWC_VERSIONS) {
assertBwcSerialization(testInstance, bwcVersion);
}
}
}
protected final void assertBwcSerialization(LocalSupplier testInstance, TransportVersion version) throws IOException {
LocalSupplier deserializedInstance = copyInstance(testInstance, version);
assertOnBWCObject(testInstance, deserializedInstance, version);
}
protected abstract void assertOnBWCObject(LocalSupplier testInstance, LocalSupplier bwcDeserializedObject, TransportVersion version);
@Override
protected LocalSupplier copyInstance(LocalSupplier instance, TransportVersion version) throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) {
output.setTransportVersion(version);
writeTo(output, instance, version);
try (StreamInput in = output.bytes().streamInput()) {
in.setTransportVersion(version);
return readFrom(in, version);
}
}
}
protected void writeTo(BytesStreamOutput output, LocalSupplier instance, TransportVersion version) throws IOException {
if (version.supports(ESQL_LOCAL_RELATION_WITH_NEW_BLOCKS)) {
new PlanStreamOutput(output, null).writeNamedWriteable(instance);
} else {
instance.writeTo(new PlanStreamOutput(output, null));
}
}
protected LocalSupplier readFrom(StreamInput input, TransportVersion version) throws IOException {
if (version.supports(ESQL_LOCAL_RELATION_WITH_NEW_BLOCKS)) {
return new PlanStreamInput(input, getNamedWriteableRegistry(), null).readNamedWriteable(LocalSupplier.class);
} else {
return LocalSourceExec.readLegacyLocalSupplierFrom(new PlanStreamInput(input, getNamedWriteableRegistry(), null));
}
}
@Override
protected LocalSupplier createTestInstance() {
return randomLocalSupplier();
}
public static LocalSupplier randomLocalSupplier() {
return randomBoolean() ? EmptyLocalSupplier.EMPTY : randomNonEmpty();
}
public static LocalSupplier randomNonEmpty() {
int blockSize = randomInt(1000);
Block[] blocks = randomList(1, 10, () -> LocalSupplierTests.randomBlock(blockSize)).toArray(Block[]::new);
return randomBoolean() ? LocalSupplier.of(new Page(blocks)) : new CopyingLocalSupplier(new Page(blocks));
}
@Override
protected LocalSupplier mutateInstance(LocalSupplier instance) throws IOException {
Page page = instance.get();
Block[] blocks = new Block[page.getBlockCount()];
for (int i = 0; i < page.getBlockCount(); i++) {
blocks[i] = page.getBlock(i);
}
if (blocks.length > 0 && randomBoolean()) {
if (randomBoolean()) {
return EmptyLocalSupplier.EMPTY;
}
return LocalSupplier.of(new Page(page.getPositionCount(), Arrays.copyOf(blocks, blocks.length - 1, Block[].class)));
}
blocks = Arrays.copyOf(blocks, blocks.length + 1, Block[].class);
blocks[blocks.length - 1] = randomBlock(page.getPositionCount());
return LocalSupplier.of(new Page(blocks));
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(PlanWritables.others());
}
static Block randomBlock(int blockSize) {
try (IntBlock.Builder ints = BLOCK_FACTORY.newIntBlockBuilder(blockSize)) {
for (int i = 0; i < blockSize; i++) {
ints.appendInt(randomInt());
}
return ints.build();
}
}
@Override
protected boolean shouldBeSame(LocalSupplier newInstance) {
return newInstance.get().getBlockCount() == 0;
}
}
| LocalSupplierTests |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/dfs/DfsKnnResults.java | {
"start": 859,
"end": 1827
} | class ____ implements Writeable {
private final String nestedPath;
private final ScoreDoc[] scoreDocs;
public DfsKnnResults(String nestedPath, ScoreDoc[] scoreDocs) {
this.nestedPath = nestedPath;
this.scoreDocs = scoreDocs;
}
public DfsKnnResults(StreamInput in) throws IOException {
scoreDocs = in.readArray(Lucene::readScoreDoc, ScoreDoc[]::new);
if (in.getTransportVersion().onOrAfter(V_8_11_X)) {
nestedPath = in.readOptionalString();
} else {
nestedPath = null;
}
}
public String getNestedPath() {
return nestedPath;
}
public ScoreDoc[] scoreDocs() {
return scoreDocs;
}
public void writeTo(StreamOutput out) throws IOException {
out.writeArray(Lucene::writeScoreDoc, scoreDocs);
if (out.getTransportVersion().onOrAfter(V_8_11_X)) {
out.writeOptionalString(nestedPath);
}
}
}
| DfsKnnResults |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/file/NewFileConsumerTest.java | {
"start": 2164,
"end": 2721
} | class ____ extends FileEndpoint {
private volatile boolean post;
@Override
protected FileConsumer newFileConsumer(Processor processor, GenericFileOperations<File> operations) {
return new FileConsumer(this, processor, operations, createGenericFileStrategy()) {
@Override
protected void postPollCheck(int polledMessages) {
post = true;
}
};
}
public boolean isPost() {
return post;
}
}
}
| MyFileEndpoint |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/observers/SafeObserver.java | {
"start": 1192,
"end": 6957
} | class ____<T> implements Observer<T>, Disposable {
/** The actual Subscriber. */
final Observer<? super T> downstream;
/** The subscription. */
Disposable upstream;
/** Indicates a terminal state. */
boolean done;
/**
* Constructs a {@code SafeObserver} by wrapping the given actual {@link Observer}.
* @param downstream the actual {@code Observer} to wrap, not {@code null} (not validated)
*/
public SafeObserver(@NonNull Observer<? super T> downstream) {
this.downstream = downstream;
}
@Override
public void onSubscribe(@NonNull Disposable d) {
if (DisposableHelper.validate(this.upstream, d)) {
this.upstream = d;
try {
downstream.onSubscribe(this);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
done = true;
// can't call onError because the actual's state may be corrupt at this point
try {
d.dispose();
} catch (Throwable e1) {
Exceptions.throwIfFatal(e1);
RxJavaPlugins.onError(new CompositeException(e, e1));
return;
}
RxJavaPlugins.onError(e);
}
}
}
@Override
public void dispose() {
upstream.dispose();
}
@Override
public boolean isDisposed() {
return upstream.isDisposed();
}
@Override
public void onNext(@NonNull T t) {
if (done) {
return;
}
if (upstream == null) {
onNextNoSubscription();
return;
}
if (t == null) {
Throwable ex = ExceptionHelper.createNullPointerException("onNext called with a null value.");
try {
upstream.dispose();
} catch (Throwable e1) {
Exceptions.throwIfFatal(e1);
onError(new CompositeException(ex, e1));
return;
}
onError(ex);
return;
}
try {
downstream.onNext(t);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
try {
upstream.dispose();
} catch (Throwable e1) {
Exceptions.throwIfFatal(e1);
onError(new CompositeException(e, e1));
return;
}
onError(e);
}
}
void onNextNoSubscription() {
done = true;
Throwable ex = new NullPointerException("Subscription not set!");
try {
downstream.onSubscribe(EmptyDisposable.INSTANCE);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
// can't call onError because the actual's state may be corrupt at this point
RxJavaPlugins.onError(new CompositeException(ex, e));
return;
}
try {
downstream.onError(ex);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
// if onError failed, all that's left is to report the error to plugins
RxJavaPlugins.onError(new CompositeException(ex, e));
}
}
@Override
public void onError(@NonNull Throwable t) {
if (done) {
RxJavaPlugins.onError(t);
return;
}
done = true;
if (upstream == null) {
Throwable npe = new NullPointerException("Subscription not set!");
try {
downstream.onSubscribe(EmptyDisposable.INSTANCE);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
// can't call onError because the actual's state may be corrupt at this point
RxJavaPlugins.onError(new CompositeException(t, npe, e));
return;
}
try {
downstream.onError(new CompositeException(t, npe));
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
// if onError failed, all that's left is to report the error to plugins
RxJavaPlugins.onError(new CompositeException(t, npe, e));
}
return;
}
if (t == null) {
t = ExceptionHelper.createNullPointerException("onError called with a null Throwable.");
}
try {
downstream.onError(t);
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
RxJavaPlugins.onError(new CompositeException(t, ex));
}
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
if (upstream == null) {
onCompleteNoSubscription();
return;
}
try {
downstream.onComplete();
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
RxJavaPlugins.onError(e);
}
}
void onCompleteNoSubscription() {
Throwable ex = new NullPointerException("Subscription not set!");
try {
downstream.onSubscribe(EmptyDisposable.INSTANCE);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
// can't call onError because the actual's state may be corrupt at this point
RxJavaPlugins.onError(new CompositeException(ex, e));
return;
}
try {
downstream.onError(ex);
} catch (Throwable e) {
Exceptions.throwIfFatal(e);
// if onError failed, all that's left is to report the error to plugins
RxJavaPlugins.onError(new CompositeException(ex, e));
}
}
}
| SafeObserver |
java | google__auto | value/src/main/java/com/google/auto/value/extension/AutoValueExtension.java | {
"start": 11856,
"end": 13269
} | class ____ {
* abstract Builder setBar(String x);
* abstract Foo build();
* }
* }
* }</pre>
*
* <p>Here {@code builderMethods()} will return a set containing the method {@code
* Foo.builder()}. Generated code should usually call this method in preference to constructing
* {@code AutoValue_Foo.Builder()} directly, because this method can establish default values
* for properties, as it does here.
*/
Set<ExecutableElement> builderMethods();
/**
* Returns the method {@code build()} in the builder class, if it exists and returns the
* {@code @AutoValue} type. This is the method that generated code for {@code @AutoValue class
* Foo} should call in order to get an instance of {@code Foo} from its builder. The returned
* method is called {@code build()}; if the builder uses some other name then extensions have no
* good way to guess how they should build.
*
* <p>A common convention is for {@code build()} to be a concrete method in the
* {@code @AutoValue.Builder} class, which calls an abstract method {@code autoBuild()} that is
* implemented in the generated subclass. The {@code build()} method can then do validation,
* defaulting, and so on.
*/
Optional<ExecutableElement> buildMethod();
/**
* Returns the abstract build method. If the {@code @AutoValue} | Builder |
java | google__auto | common/src/test/java/com/google/auto/common/BasicAnnotationProcessorTest.java | {
"start": 2833,
"end": 2954
} | interface ____ {}
@Retention(RetentionPolicy.SOURCE)
@Target(ElementType.TYPE_PARAMETER)
public @ | RequiresGeneratedCode |
java | alibaba__nacos | common/src/test/java/com/alibaba/nacos/common/notify/DefaultPublisherTest.java | {
"start": 6144,
"end": 6268
} | class ____ extends Event {
private static final long serialVersionUID = -4081244883427311461L;
}
} | MockEvent |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java | {
"start": 1933,
"end": 5005
} | class ____ extends BaseTasksRequest<Request> {
private final String id;
private final boolean waitForCompletion;
private final boolean force;
private final boolean allowNoMatch;
private final boolean waitForCheckpoint;
private Set<String> expandedIds;
public Request(
String id,
boolean waitForCompletion,
boolean force,
@Nullable TimeValue timeout,
boolean allowNoMatch,
boolean waitForCheckpoint
) {
this.id = ExceptionsHelper.requireNonNull(id, TransformField.ID.getPreferredName());
this.waitForCompletion = waitForCompletion;
this.force = force;
// use the timeout value already present in BaseTasksRequest
this.setTimeout(timeout == null ? DEFAULT_TIMEOUT : timeout);
this.allowNoMatch = allowNoMatch;
this.waitForCheckpoint = waitForCheckpoint;
}
public Request(StreamInput in) throws IOException {
super(in);
id = in.readString();
waitForCompletion = in.readBoolean();
force = in.readBoolean();
if (in.readBoolean()) {
expandedIds = new HashSet<>(Arrays.asList(in.readStringArray()));
}
this.allowNoMatch = in.readBoolean();
this.waitForCheckpoint = in.readBoolean();
}
public String getId() {
return id;
}
public boolean waitForCompletion() {
return waitForCompletion;
}
public boolean isForce() {
return force;
}
public Set<String> getExpandedIds() {
return expandedIds;
}
public void setExpandedIds(Set<String> expandedIds) {
this.expandedIds = expandedIds;
}
public boolean isAllowNoMatch() {
return allowNoMatch;
}
public boolean isWaitForCheckpoint() {
return waitForCheckpoint;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(id);
out.writeBoolean(waitForCompletion);
out.writeBoolean(force);
boolean hasExpandedIds = expandedIds != null;
out.writeBoolean(hasExpandedIds);
if (hasExpandedIds) {
out.writeStringCollection(expandedIds);
}
out.writeBoolean(allowNoMatch);
out.writeBoolean(waitForCheckpoint);
}
@Override
public ActionRequestValidationException validate() {
if (force && waitForCheckpoint) {
return addValidationError(
format("cannot set both [%s] and [%s] to true", TransformField.FORCE, TransformField.WAIT_FOR_CHECKPOINT),
null
);
}
return null;
}
@Override
public int hashCode() {
// the base | Request |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncWaitStep.java | {
"start": 951,
"end": 1532
} | class ____ extends Step {
@Nullable
private final Client client;
public AsyncWaitStep(StepKey key, StepKey nextStepKey, Client client) {
super(key, nextStepKey);
this.client = client;
}
// For testing only
@Nullable
Client getClient() {
return client;
}
protected Client getClient(ProjectId projectId) {
return client.projectClient(projectId);
}
public abstract void evaluateCondition(ProjectState state, IndexMetadata indexMetadata, Listener listener, TimeValue masterTimeout);
public | AsyncWaitStep |
java | alibaba__nacos | common/src/main/java/com/alibaba/nacos/common/packagescan/util/PathMatcher.java | {
"start": 1294,
"end": 5889
} | interface ____ {
/**
* Does the given {@code path} represent a pattern that can be matched
* by an implementation of this interface?
*
* <p>If the return value is {@code false}, then the {@link #match}
* method does not have to be used because direct equality comparisons
* on the static path Strings will lead to the same result.
*
* @param path the path to check
* @return {@code true} if the given {@code path} represents a pattern
*/
boolean isPattern(String path);
/**
* Match the given {@code path} against the given {@code pattern},
* according to this PathMatcher's matching strategy.
*
* @param pattern the pattern to match against
* @param path the path to test
* @return {@code true} if the supplied {@code path} matched,
* {@code false} if it didn't
*/
boolean match(String pattern, String path);
/**
* Match the given {@code path} against the corresponding part of the given
* {@code pattern}, according to this PathMatcher's matching strategy.
*
* <p>Determines whether the pattern at least matches as far as the given base
* path goes, assuming that a full path may then match as well.
*
* @param pattern the pattern to match against
* @param path the path to test
* @return {@code true} if the supplied {@code path} matched,
* {@code false} if it didn't
*/
boolean matchStart(String pattern, String path);
/**
* Given a pattern and a full path, determine the pattern-mapped part.
*
* <p>This method is supposed to find out which part of the path is matched
* dynamically through an actual pattern, that is, it strips off a statically
* defined leading path from the given full path, returning only the actually
* pattern-matched part of the path.
*
* <p>For example: For "myroot/*.html" as pattern and "myroot/myfile.html"
* as full path, this method should return "myfile.html". The detailed
* determination rules are specified to this PathMatcher's matching strategy.
*
* <p>A simple implementation may return the given full path as-is in case
* of an actual pattern, and the empty String in case of the pattern not
* containing any dynamic parts (i.e. the {@code pattern} parameter being
* a static path that wouldn't qualify as an actual {@link #isPattern pattern}).
* A sophisticated implementation will differentiate between the static parts
* and the dynamic parts of the given path pattern.
*
* @param pattern the path pattern
* @param path the full path to introspect
* @return the pattern-mapped part of the given {@code path}
* (never {@code null})
*/
String extractPathWithinPattern(String pattern, String path);
/**
* Given a pattern and a full path, extract the URI template variables. URI template
* variables are expressed through curly brackets ('{' and '}').
*
* <p>For example: For pattern "/hotels/{hotel}" and path "/hotels/1", this method will
* return a map containing "hotel" → "1".
*
* @param pattern the path pattern, possibly containing URI templates
* @param path the full path to extract template variables from
* @return a map, containing variable names as keys; variables values as values
*/
Map<String, String> extractUriTemplateVariables(String pattern, String path);
/**
* Given a full path, returns a {@link Comparator} suitable for sorting patterns
* in order of explicitness for that path.
*
* <p>The full algorithm used depends on the underlying implementation,
* but generally, the returned {@code Comparator} will
* {@linkplain java.util.List#sort(Comparator) sort}
* a list so that more specific patterns come before generic patterns.
*
* @param path the full path to use for comparison
* @return a comparator capable of sorting patterns in order of explicitness
*/
Comparator<String> getPatternComparator(String path);
/**
* Combines two patterns into a new pattern that is returned.
*
* <p>The full algorithm used for combining the two pattern depends on the underlying implementation.
*
* @param pattern1 the first pattern
* @param pattern2 the second pattern
* @return the combination of the two patterns
* @throws IllegalArgumentException when the two patterns cannot be combined
*/
String combine(String pattern1, String pattern2);
}
| PathMatcher |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/service/TaskExecutorTests.java | {
"start": 3184,
"end": 3595
} | class ____ implements TestExecutor<TestTask>, TestListener {
@Override
public void execute(List<TestTask> tasks) {
tasks.forEach(TestTask::run);
}
@Nullable
public TimeValue timeout() {
return null;
}
public Priority priority() {
return Priority.NORMAL;
}
public abstract void run();
}
| TestTask |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/DoclingComponentBuilderFactory.java | {
"start": 24402,
"end": 31075
} | class ____
extends AbstractComponentBuilder<DoclingComponent>
implements DoclingComponentBuilder {
@Override
protected DoclingComponent buildConcreteComponent() {
return new DoclingComponent();
}
private org.apache.camel.component.docling.DoclingConfiguration getOrCreateConfiguration(DoclingComponent component) {
if (component.getConfiguration() == null) {
component.setConfiguration(new org.apache.camel.component.docling.DoclingConfiguration());
}
return component.getConfiguration();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "configuration": ((DoclingComponent) component).setConfiguration((org.apache.camel.component.docling.DoclingConfiguration) value); return true;
case "contentInBody": getOrCreateConfiguration((DoclingComponent) component).setContentInBody((boolean) value); return true;
case "doclingServeUrl": getOrCreateConfiguration((DoclingComponent) component).setDoclingServeUrl((java.lang.String) value); return true;
case "enableOCR": getOrCreateConfiguration((DoclingComponent) component).setEnableOCR((boolean) value); return true;
case "includeLayoutInfo": getOrCreateConfiguration((DoclingComponent) component).setIncludeLayoutInfo((boolean) value); return true;
case "lazyStartProducer": ((DoclingComponent) component).setLazyStartProducer((boolean) value); return true;
case "ocrLanguage": getOrCreateConfiguration((DoclingComponent) component).setOcrLanguage((java.lang.String) value); return true;
case "operation": getOrCreateConfiguration((DoclingComponent) component).setOperation((org.apache.camel.component.docling.DoclingOperations) value); return true;
case "outputFormat": getOrCreateConfiguration((DoclingComponent) component).setOutputFormat((java.lang.String) value); return true;
case "useDoclingServe": getOrCreateConfiguration((DoclingComponent) component).setUseDoclingServe((boolean) value); return true;
case "apiTimeout": getOrCreateConfiguration((DoclingComponent) component).setApiTimeout((long) value); return true;
case "asyncPollInterval": getOrCreateConfiguration((DoclingComponent) component).setAsyncPollInterval((long) value); return true;
case "asyncTimeout": getOrCreateConfiguration((DoclingComponent) component).setAsyncTimeout((long) value); return true;
case "autowiredEnabled": ((DoclingComponent) component).setAutowiredEnabled((boolean) value); return true;
case "connectionRequestTimeout": getOrCreateConfiguration((DoclingComponent) component).setConnectionRequestTimeout((int) value); return true;
case "connectionTimeout": getOrCreateConfiguration((DoclingComponent) component).setConnectionTimeout((int) value); return true;
case "connectionTimeToLive": getOrCreateConfiguration((DoclingComponent) component).setConnectionTimeToLive((long) value); return true;
case "convertEndpoint": getOrCreateConfiguration((DoclingComponent) component).setConvertEndpoint((java.lang.String) value); return true;
case "doclingCommand": getOrCreateConfiguration((DoclingComponent) component).setDoclingCommand((java.lang.String) value); return true;
case "evictIdleConnections": getOrCreateConfiguration((DoclingComponent) component).setEvictIdleConnections((boolean) value); return true;
case "maxConnectionsPerRoute": getOrCreateConfiguration((DoclingComponent) component).setMaxConnectionsPerRoute((int) value); return true;
case "maxIdleTime": getOrCreateConfiguration((DoclingComponent) component).setMaxIdleTime((long) value); return true;
case "maxTotalConnections": getOrCreateConfiguration((DoclingComponent) component).setMaxTotalConnections((int) value); return true;
case "processTimeout": getOrCreateConfiguration((DoclingComponent) component).setProcessTimeout((long) value); return true;
case "socketTimeout": getOrCreateConfiguration((DoclingComponent) component).setSocketTimeout((int) value); return true;
case "useAsyncMode": getOrCreateConfiguration((DoclingComponent) component).setUseAsyncMode((boolean) value); return true;
case "validateAfterInactivity": getOrCreateConfiguration((DoclingComponent) component).setValidateAfterInactivity((int) value); return true;
case "workingDirectory": getOrCreateConfiguration((DoclingComponent) component).setWorkingDirectory((java.lang.String) value); return true;
case "batchFailOnFirstError": getOrCreateConfiguration((DoclingComponent) component).setBatchFailOnFirstError((boolean) value); return true;
case "batchParallelism": getOrCreateConfiguration((DoclingComponent) component).setBatchParallelism((int) value); return true;
case "batchSize": getOrCreateConfiguration((DoclingComponent) component).setBatchSize((int) value); return true;
case "batchTimeout": getOrCreateConfiguration((DoclingComponent) component).setBatchTimeout((long) value); return true;
case "splitBatchResults": getOrCreateConfiguration((DoclingComponent) component).setSplitBatchResults((boolean) value); return true;
case "extractAllMetadata": getOrCreateConfiguration((DoclingComponent) component).setExtractAllMetadata((boolean) value); return true;
case "includeMetadataInHeaders": getOrCreateConfiguration((DoclingComponent) component).setIncludeMetadataInHeaders((boolean) value); return true;
case "includeRawMetadata": getOrCreateConfiguration((DoclingComponent) component).setIncludeRawMetadata((boolean) value); return true;
case "apiKeyHeader": getOrCreateConfiguration((DoclingComponent) component).setApiKeyHeader((java.lang.String) value); return true;
case "authenticationScheme": getOrCreateConfiguration((DoclingComponent) component).setAuthenticationScheme((org.apache.camel.component.docling.AuthenticationScheme) value); return true;
case "authenticationToken": getOrCreateConfiguration((DoclingComponent) component).setAuthenticationToken((java.lang.String) value); return true;
case "maxFileSize": getOrCreateConfiguration((DoclingComponent) component).setMaxFileSize((long) value); return true;
default: return false;
}
}
}
} | DoclingComponentBuilderImpl |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DialectFeatureChecks.java | {
"start": 32485,
"end": 32652
} | class ____ implements DialectFeatureCheck {
public boolean apply(Dialect dialect) {
return definesFunction( dialect, "xmlagg" );
}
}
public static | SupportsXmlagg |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_2121/Issue2121Test.java | {
"start": 483,
"end": 1011
} | class ____ {
@ProcessorTest
public void shouldCompile() {
Issue2121Mapper mapper = Issue2121Mapper.INSTANCE;
Issue2121Mapper.Target target = mapper.map( new Issue2121Mapper.Source( Issue2121Mapper.SourceEnum.VALUE1 ) );
assertThat( target ).isNotNull();
assertThat( target.getValue() ).isEqualTo( "VALUE1" );
target = mapper.map( new Issue2121Mapper.Source( null ) );
assertThat( target ).isNotNull();
assertThat( target.getValue() ).isNull();
}
}
| Issue2121Test |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java | {
"start": 1886,
"end": 3784
} | class ____ {
private final String name;
private final String value;
private final boolean isSensitive;
private final ConfigSource source;
private final boolean readOnly;
private final Collection<ConfigSynonym> synonyms;
private final ConfigType type;
private final String documentation;
public ConfigEntry(String name, String value, ConfigSource source, boolean isSensitive, boolean readOnly,
Collection<ConfigSynonym> synonyms) {
this(name, value, source, isSensitive, readOnly, synonyms, ConfigType.UNKNOWN, null);
}
public ConfigEntry(String name, String value, ConfigSource source, boolean isSensitive, boolean readOnly,
Collection<ConfigSynonym> synonyms, ConfigType type, String documentation) {
this.name = Objects.requireNonNull(name, "name");
this.value = value;
this.source = Objects.requireNonNull(source, "source");
this.isSensitive = isSensitive;
this.readOnly = readOnly;
this.synonyms = Objects.requireNonNull(synonyms, "synonyms");
this.type = type;
this.documentation = documentation;
}
public String name() {
return name;
}
public String value() {
return value;
}
public boolean isSensitive() {
return isSensitive;
}
public ConfigSource source() {
return source;
}
public boolean isReadOnly() {
return readOnly;
}
public Collection<ConfigSynonym> synonyms() {
return synonyms;
}
public ConfigType type() {
return type;
}
public String documentation() {
return documentation;
}
}
public | ConfigEntry |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java | {
"start": 1982,
"end": 2085
} | class ____ {
Logger LOG = LoggerFactory.getLogger(TestValueQueue.class);
private static | TestValueQueue |
java | alibaba__nacos | common/src/test/java/com/alibaba/nacos/common/ability/AbstractAbilityControlManagerTest.java | {
"start": 1413,
"end": 5545
} | class ____ {
private AbstractAbilityControlManager abilityControlManager;
private Subscriber<AbstractAbilityControlManager.AbilityUpdateEvent> mockSubscriber;
private boolean isOn = true;
private AssertionError assertionError;
private boolean notified = false;
@BeforeEach
void setUp() throws Exception {
mockSubscriber = new Subscriber<AbstractAbilityControlManager.AbilityUpdateEvent>() {
@Override
public void onEvent(AbstractAbilityControlManager.AbilityUpdateEvent event) {
notified = true;
try {
assertEquals(AbilityKey.SERVER_FUZZY_WATCH, event.getAbilityKey());
assertEquals(isOn, event.isOn());
assertEquals(2, event.getAbilityTable().size());
assertEquals(isOn, event.getAbilityTable().get(AbilityKey.SERVER_FUZZY_WATCH.getName()));
} catch (AssertionError error) {
assertionError = error;
}
}
@Override
public Class<? extends Event> subscribeType() {
return AbstractAbilityControlManager.AbilityUpdateEvent.class;
}
};
abilityControlManager = new MockAbilityControlManager();
NotifyCenter.registerSubscriber(mockSubscriber);
}
@AfterEach
void tearDown() throws Exception {
NotifyCenter.deregisterSubscriber(mockSubscriber);
assertionError = null;
notified = false;
}
@Test
void testEnableCurrentNodeAbility() throws InterruptedException {
isOn = true;
abilityControlManager.enableCurrentNodeAbility(AbilityKey.SERVER_FUZZY_WATCH);
TimeUnit.MILLISECONDS.sleep(1100);
assertTrue(notified);
if (null != assertionError) {
throw assertionError;
}
}
@Test
void testDisableCurrentNodeAbility() throws InterruptedException {
isOn = false;
abilityControlManager.disableCurrentNodeAbility(AbilityKey.SERVER_FUZZY_WATCH);
TimeUnit.MILLISECONDS.sleep(1100);
assertTrue(notified);
if (null != assertionError) {
throw assertionError;
}
}
@Test
void testIsCurrentNodeAbilityRunning() {
assertEquals(AbilityStatus.SUPPORTED, abilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_FUZZY_WATCH));
assertEquals(AbilityStatus.NOT_SUPPORTED, abilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_DISTRIBUTED_LOCK));
assertEquals(AbilityStatus.UNKNOWN, abilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SDK_CLIENT_FUZZY_WATCH));
}
@Test
void testGetCurrentNodeAbilities() {
Map<String, Boolean> actual = abilityControlManager.getCurrentNodeAbilities(AbilityMode.SERVER);
assertEquals(2, actual.size());
assertTrue(actual.containsKey(AbilityKey.SERVER_FUZZY_WATCH.getName()));
assertTrue(actual.containsKey(AbilityKey.SERVER_DISTRIBUTED_LOCK.getName()));
actual = abilityControlManager.getCurrentNodeAbilities(AbilityMode.SDK_CLIENT);
assertTrue(actual.isEmpty());
}
@Test
void testGetPriority() {
assertEquals(Integer.MIN_VALUE, abilityControlManager.getPriority());
}
@Test
void testInitFailed() {
assertThrows(IllegalStateException.class, () -> {
abilityControlManager = new AbstractAbilityControlManager() {
@Override
protected Map<AbilityMode, Map<AbilityKey, Boolean>> initCurrentNodeAbilities() {
Map<AbilityKey, Boolean> abilities = Collections.singletonMap(AbilityKey.SDK_CLIENT_FUZZY_WATCH, true);
return Collections.singletonMap(AbilityMode.SERVER, abilities);
}
@Override
public int getPriority() {
return 0;
}
};
});
}
private static final | AbstractAbilityControlManagerTest |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/CacheLimitPolicy.java | {
"start": 935,
"end": 2105
} | interface ____ {
/** Whether to support directly write in cache. */
boolean directWriteInCache();
/**
* Whether the cache usage is safe to add.
*
* @param toAddSize
* @return false if the toAddSize is larger than max available capacity, true otherwise.
*/
boolean isSafeToAdd(long toAddSize);
/**
* Whether the cache usage is exceeded the upperbound.
*
* @param toAddSize the size about to add.
* @param hasFile whether the file is already in cache.
* @return true if the cache usage is overflow, false otherwise.
*/
boolean isOverflow(long toAddSize, boolean hasFile);
/**
* Acquire cache.
*
* @param toAddSize
*/
void acquire(long toAddSize);
/**
* Release cache.
*
* @param toReleaseSize
*/
void release(long toReleaseSize);
/**
* Get current used bytes.
*
* @return cache bytes.
*/
long usedBytes();
/**
* Register customized metrics.
*
* @param prefix
* @param metricGroup
*/
void registerCustomizedMetrics(String prefix, MetricGroup metricGroup);
}
| CacheLimitPolicy |
java | apache__flink | flink-kubernetes/src/main/java/org/apache/flink/kubernetes/highavailability/KubernetesLeaderElectionHaServices.java | {
"start": 2507,
"end": 8958
} | class ____ extends AbstractHaServices {
private static final Logger LOG =
LoggerFactory.getLogger(KubernetesLeaderElectionHaServices.class);
private final String clusterId;
private final FlinkKubeClient kubeClient;
private final KubernetesConfigMapSharedWatcher configMapSharedWatcher;
private final ExecutorService watchExecutorService;
private final String lockIdentity;
KubernetesLeaderElectionHaServices(
FlinkKubeClient kubeClient,
Executor ioExecutor,
Configuration configuration,
BlobStoreService blobStoreService)
throws Exception {
this(
kubeClient,
kubeClient.createConfigMapSharedWatcher(
getClusterConfigMap(configuration.get(KubernetesConfigOptions.CLUSTER_ID))),
Executors.newCachedThreadPool(
new ExecutorThreadFactory("config-map-watch-handler")),
ioExecutor,
configuration.get(KubernetesConfigOptions.CLUSTER_ID),
UUID.randomUUID().toString(),
configuration,
blobStoreService);
}
private KubernetesLeaderElectionHaServices(
FlinkKubeClient kubeClient,
KubernetesConfigMapSharedWatcher configMapSharedWatcher,
ExecutorService watchExecutorService,
Executor ioExecutor,
String clusterId,
String lockIdentity,
Configuration configuration,
BlobStoreService blobStoreService)
throws Exception {
super(
configuration,
createDriverFactory(
kubeClient,
configMapSharedWatcher,
watchExecutorService,
clusterId,
lockIdentity,
configuration),
ioExecutor,
blobStoreService,
FileSystemJobResultStore.fromConfiguration(configuration, ioExecutor));
this.kubeClient = checkNotNull(kubeClient);
this.clusterId = checkNotNull(clusterId);
this.configMapSharedWatcher = checkNotNull(configMapSharedWatcher);
this.watchExecutorService = checkNotNull(watchExecutorService);
this.lockIdentity = checkNotNull(lockIdentity);
}
private static LeaderElectionDriverFactory createDriverFactory(
FlinkKubeClient kubeClient,
KubernetesConfigMapSharedWatcher configMapSharedWatcher,
Executor watchExecutorService,
String clusterId,
String lockIdentity,
Configuration configuration) {
final KubernetesLeaderElectionConfiguration leaderElectionConfiguration =
new KubernetesLeaderElectionConfiguration(
getClusterConfigMap(clusterId), lockIdentity, configuration);
return new KubernetesLeaderElectionDriverFactory(
kubeClient,
leaderElectionConfiguration,
configMapSharedWatcher,
watchExecutorService);
}
@Override
protected LeaderRetrievalService createLeaderRetrievalService(String componentId) {
return new DefaultLeaderRetrievalService(
new KubernetesLeaderRetrievalDriverFactory(
configMapSharedWatcher,
watchExecutorService,
getClusterConfigMap(),
componentId));
}
@Override
protected CheckpointRecoveryFactory createCheckpointRecoveryFactory() {
return KubernetesCheckpointRecoveryFactory.withoutLeadershipValidation(
kubeClient, configuration, ioExecutor, clusterId, this::getJobSpecificConfigMap);
}
private String getJobSpecificConfigMap(JobID jobID) {
return clusterId + NAME_SEPARATOR + jobID.toString() + NAME_SEPARATOR + "config-map";
}
@Override
protected ExecutionPlanStore createExecutionPlanStore() throws Exception {
return KubernetesUtils.createExecutionPlanStore(
configuration, kubeClient, getClusterConfigMap(), lockIdentity);
}
private String getClusterConfigMap() {
return getClusterConfigMap(clusterId);
}
private static String getClusterConfigMap(String clusterId) {
return clusterId + NAME_SEPARATOR + "cluster-config-map";
}
@Override
public void internalClose() throws Exception {
Exception exception = null;
try {
closeK8sServices();
} catch (Exception e) {
exception = e;
}
kubeClient.close();
ExecutorUtils.gracefulShutdown(5, TimeUnit.SECONDS, this.watchExecutorService);
ExceptionUtils.tryRethrowException(exception);
}
private void closeK8sServices() {
configMapSharedWatcher.close();
final int outstandingTaskCount = watchExecutorService.shutdownNow().size();
if (outstandingTaskCount != 0) {
LOG.debug(
"The k8s HA services were closed with {} event(s) still not being processed. No further action necessary.",
outstandingTaskCount);
}
}
@Override
public void internalCleanup() throws Exception {
Exception exception = null;
// in order to clean up, we first need to stop the services that rely on the config maps
try {
closeK8sServices();
} catch (Exception e) {
exception = e;
}
kubeClient.deleteConfigMap(getClusterConfigMap()).get();
ExceptionUtils.tryRethrowException(exception);
}
@Override
public void internalCleanupJobData(JobID jobID) throws Exception {
kubeClient.deleteConfigMap(getJobSpecificConfigMap(jobID)).get();
// need to delete job specific leader address from leader config map
}
@Override
protected String getLeaderPathForResourceManager() {
return "resourcemanager";
}
@Override
protected String getLeaderPathForDispatcher() {
return "dispatcher";
}
@Override
protected String getLeaderPathForJobManager(JobID jobID) {
return "job-" + jobID.toString();
}
@Override
protected String getLeaderPathForRestServer() {
return "restserver";
}
}
| KubernetesLeaderElectionHaServices |
java | grpc__grpc-java | core/src/testFixtures/java/io/grpc/internal/FakeClock.java | {
"start": 11820,
"end": 11981
} | interface ____ {
/**
* Inspect the Runnable and returns true if it should be accepted.
*/
boolean shouldAccept(Runnable runnable);
}
}
| TaskFilter |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java | {
"start": 2083,
"end": 8529
} | class ____ extends BlockInfo {
private final ErasureCodingPolicy ecPolicy;
/**
* Always the same size with triplets. Record the block index for each triplet
* TODO: actually this is only necessary for over-replicated block. Thus can
* be further optimized to save memory usage.
*/
private byte[] indices;
public BlockInfoStriped(Block blk, ErasureCodingPolicy ecPolicy) {
super(blk, (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()));
indices = new byte[ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()];
initIndices();
this.ecPolicy = ecPolicy;
}
public short getTotalBlockNum() {
return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
}
public short getDataBlockNum() {
return (short) ecPolicy.getNumDataUnits();
}
public short getParityBlockNum() {
return (short) ecPolicy.getNumParityUnits();
}
public int getCellSize() {
return ecPolicy.getCellSize();
}
/**
* If the block is committed/completed and its length is less than a full
* stripe, it returns the the number of actual data blocks.
* Otherwise it returns the number of data units specified by erasure coding policy.
*/
public short getRealDataBlockNum() {
if (isComplete() || getBlockUCState() == BlockUCState.COMMITTED) {
return (short) Math.min(getDataBlockNum(),
(getNumBytes() - 1) / ecPolicy.getCellSize() + 1);
} else {
return getDataBlockNum();
}
}
public short getRealTotalBlockNum() {
return (short) (getRealDataBlockNum() + getParityBlockNum());
}
public ErasureCodingPolicy getErasureCodingPolicy() {
return ecPolicy;
}
private void initIndices() {
for (int i = 0; i < indices.length; i++) {
indices[i] = -1;
}
}
private int findSlot() {
int i = getTotalBlockNum();
int capacity = getCapacity();
for (; i < capacity; i++) {
if (getStorageInfo(i) == null) {
return i;
}
}
// need to expand the triplet size
ensureCapacity(i + 1, true);
return i;
}
@Override
boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
Preconditions.checkArgument(BlockIdManager.isStripedBlockID(
reportedBlock.getBlockId()), "reportedBlock is not striped");
Preconditions.checkArgument(BlockIdManager.convertToStripedID(
reportedBlock.getBlockId()) == this.getBlockId(),
"reported blk_%s does not belong to the group of stored blk_%s",
reportedBlock.getBlockId(), this.getBlockId());
int blockIndex = BlockIdManager.getBlockIndex(reportedBlock);
int index = blockIndex;
DatanodeStorageInfo old = getStorageInfo(index);
if (old != null && !old.equals(storage)) { // over replicated
// check if the storage has been stored
int i = findStorageInfo(storage);
if (i == -1) {
index = findSlot();
} else {
return true;
}
}
addStorage(storage, index, blockIndex);
return true;
}
private void addStorage(DatanodeStorageInfo storage, int index,
int blockIndex) {
setStorageInfo(index, storage);
setNext(index, null);
setPrevious(index, null);
indices[index] = (byte) blockIndex;
}
private int findStorageInfoFromEnd(DatanodeStorageInfo storage) {
final int len = getCapacity();
for(int idx = len - 1; idx >= 0; idx--) {
DatanodeStorageInfo cur = getStorageInfo(idx);
if (storage.equals(cur)) {
return idx;
}
}
return -1;
}
@VisibleForTesting
public byte getStorageBlockIndex(DatanodeStorageInfo storage) {
int i = this.findStorageInfo(storage);
return i == -1 ? -1 : indices[i];
}
/**
* Identify the block stored in the given datanode storage. Note that
* the returned block has the same block Id with the one seen/reported by the
* DataNode.
*/
Block getBlockOnStorage(DatanodeStorageInfo storage) {
int index = getStorageBlockIndex(storage);
if (index < 0) {
return null;
} else {
Block block = new Block(this);
block.setBlockId(this.getBlockId() + index);
return block;
}
}
@Override
boolean removeStorage(DatanodeStorageInfo storage) {
int dnIndex = findStorageInfoFromEnd(storage);
if (dnIndex < 0) { // the node is not found
return false;
}
assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
"Block is still in the list and must be removed first.";
// set the triplet to null
setStorageInfo(dnIndex, null);
setNext(dnIndex, null);
setPrevious(dnIndex, null);
indices[dnIndex] = -1;
return true;
}
private void ensureCapacity(int totalSize, boolean keepOld) {
if (getCapacity() < totalSize) {
Object[] old = triplets;
byte[] oldIndices = indices;
triplets = new Object[totalSize * 3];
indices = new byte[totalSize];
initIndices();
if (keepOld) {
System.arraycopy(old, 0, triplets, 0, old.length);
System.arraycopy(oldIndices, 0, indices, 0, oldIndices.length);
}
}
}
public long spaceConsumed() {
// In case striped blocks, total usage by this striped blocks should
// be the total of data blocks and parity blocks because
// `getNumBytes` is the total of actual data block size.
return StripedBlockUtil.spaceConsumedByStripedBlock(getNumBytes(),
ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits(),
ecPolicy.getCellSize());
}
@Override
public final boolean isStriped() {
return true;
}
@Override
public BlockType getBlockType() {
return BlockType.STRIPED;
}
@Override
public int numNodes() {
assert this.triplets != null : "BlockInfo is not initialized";
assert triplets.length % 3 == 0 : "Malformed BlockInfo";
int num = 0;
for (int idx = getCapacity()-1; idx >= 0; idx--) {
if (getStorageInfo(idx) != null) {
num++;
}
}
return num;
}
@Override
final boolean hasNoStorage() {
final int len = getCapacity();
for(int idx = 0; idx < len; idx++) {
if (getStorageInfo(idx) != null) {
return false;
}
}
return true;
}
/**
* Striped blocks on Provided Storage is not supported. All blocks on
* Provided storage are assumed to be "contiguous".
*/
@Override
boolean isProvided() {
return false;
}
/**
* This | BlockInfoStriped |
java | bumptech__glide | samples/flickr/src/main/java/com/bumptech/glide/samples/flickr/api/SearchQuery.java | {
"start": 124,
"end": 1654
} | class ____ implements Query {
public static final Creator<SearchQuery> CREATOR =
new Creator<SearchQuery>() {
@Override
public SearchQuery createFromParcel(Parcel source) {
return new SearchQuery(source);
}
@Override
public SearchQuery[] newArray(int size) {
return new SearchQuery[size];
}
};
private final String queryString;
private boolean requireSafeOverQuality;
public SearchQuery(String queryString) {
this.queryString = queryString;
}
/**
* Requires the search to be as safe as possible, evne if it substantially limits the results in a
* way that might otherwise be unexpected.
*/
public SearchQuery requireSafeOverQuality() {
requireSafeOverQuality = true;
return this;
}
private SearchQuery(Parcel in) {
queryString = in.readString();
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeString(queryString);
}
@Override
public int describeContents() {
return 0;
}
@Override
public String getDescription() {
return queryString;
}
@Override
public String getUrl() {
return Api.getSearchUrl(queryString, requireSafeOverQuality);
}
@Override
public boolean equals(Object o) {
if (o instanceof SearchQuery) {
SearchQuery other = (SearchQuery) o;
return queryString.equals(other.queryString);
}
return false;
}
@Override
public int hashCode() {
return queryString.hashCode();
}
}
| SearchQuery |
java | apache__maven | its/core-it-support/core-it-plugins/maven-it-plugin-all/src/main/java/org/apache/maven/plugin/coreit/AggregatorDependenciesMojo.java | {
"start": 1426,
"end": 2230
} | class ____ extends AbstractMojo {
/**
* The path to the touch file, relative to the project's base directory.
*/
@Parameter(property = "aggregator.touchFile", defaultValue = "${project.build.directory}/touch.txt")
private File touchFile;
public void execute() throws MojoExecutionException {
getLog().info("[MAVEN-CORE-IT-LOG] Touching file: " + touchFile);
if (touchFile != null) {
try {
touchFile.getParentFile().mkdirs();
touchFile.createNewFile();
} catch (IOException e) {
throw new MojoExecutionException("Failed to create touch file " + touchFile, e);
}
}
getLog().info("[MAVEN-CORE-IT-LOG] Touched file: " + touchFile);
}
}
| AggregatorDependenciesMojo |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/AnnotationUtilsTests.java | {
"start": 49369,
"end": 49743
} | class ____ extends Root {
@Order(25)
public void annotatedOnLeaf() {
}
@Meta1
public void metaAnnotatedOnLeaf() {
}
@MetaMeta
public void metaMetaAnnotatedOnLeaf() {
}
@Override
@Order(1)
public void overrideToAnnotate() {
}
@Override
public void overrideWithoutNewAnnotation() {
}
}
@Retention(RetentionPolicy.RUNTIME)
@Inherited
@ | Leaf |
java | apache__camel | components/camel-schematron/src/main/java/org/apache/camel/component/schematron/SchematronEndpoint.java | {
"start": 2311,
"end": 3473
} | class ____ extends DefaultEndpoint {
private static final Logger LOG = LoggerFactory.getLogger(SchematronEndpoint.class);
private TransformerFactory transformerFactory;
@UriPath
@Metadata(required = true, supportFileReference = true)
private String path;
@UriParam
private boolean abort;
@UriParam
private Templates rules;
@UriParam(label = "advanced")
private URIResolver uriResolver;
public SchematronEndpoint() {
}
public SchematronEndpoint(String uri, String path, SchematronComponent component) {
super(uri, component);
this.path = path;
}
@Override
public boolean isRemote() {
return false;
}
@Override
public Producer createProducer() throws Exception {
return new SchematronProducer(this);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
throw new UnsupportedOperationException("Consumer is not implemented for this component");
}
public String getPath() {
return path;
}
/**
* The path to the schematron rules file. Can either be in | SchematronEndpoint |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLContainsExpr.java | {
"start": 840,
"end": 4626
} | class ____ extends SQLExprImpl implements SQLReplaceable, Serializable {
private static final long serialVersionUID = 1L;
private boolean not;
private SQLExpr expr;
private List<SQLExpr> targetList = new ArrayList<SQLExpr>();
public SQLContainsExpr() {
}
public SQLContainsExpr(SQLExpr expr) {
this.setExpr(expr);
}
public SQLContainsExpr(SQLExpr expr, boolean not) {
this.setExpr(expr);
this.not = not;
}
public SQLContainsExpr clone() {
SQLContainsExpr x = new SQLContainsExpr();
x.not = not;
if (expr != null) {
x.setExpr(expr.clone());
}
for (SQLExpr e : targetList) {
SQLExpr e2 = e.clone();
e2.setParent(x);
x.targetList.add(e2);
}
return x;
}
public boolean isNot() {
return this.not;
}
public void setNot(boolean not) {
this.not = not;
}
public SQLExpr getExpr() {
return this.expr;
}
public void setExpr(SQLExpr expr) {
if (expr != null) {
expr.setParent(this);
}
this.expr = expr;
}
public List<SQLExpr> getTargetList() {
return this.targetList;
}
public void setTargetList(List<SQLExpr> targetList) {
this.targetList = targetList;
}
protected void accept0(SQLASTVisitor visitor) {
if (visitor.visit(this)) {
if (this.expr != null) {
this.expr.accept(visitor);
}
if (this.targetList != null) {
for (SQLExpr item : this.targetList) {
if (item != null) {
item.accept(visitor);
}
}
}
}
visitor.endVisit(this);
}
public List<SQLObject> getChildren() {
List<SQLObject> children = new ArrayList<SQLObject>();
if (this.expr != null) {
children.add(this.expr);
}
children.addAll(this.targetList);
return children;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((expr == null) ? 0 : expr.hashCode());
result = prime * result + (not ? 1231 : 1237);
result = prime * result + ((targetList == null) ? 0 : targetList.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
SQLContainsExpr other = (SQLContainsExpr) obj;
if (expr == null) {
if (other.expr != null) {
return false;
}
} else if (!expr.equals(other.expr)) {
return false;
}
if (not != other.not) {
return false;
}
if (targetList == null) {
if (other.targetList != null) {
return false;
}
} else if (!targetList.equals(other.targetList)) {
return false;
}
return true;
}
public SQLDataType computeDataType() {
return SQLBooleanExpr.DATA_TYPE;
}
@Override
public boolean replace(SQLExpr expr, SQLExpr target) {
if (this.expr == expr) {
setExpr(target);
return true;
}
for (int i = 0; i < targetList.size(); i++) {
if (targetList.get(i) == expr) {
targetList.set(i, target);
target.setParent(this);
return true;
}
}
return false;
}
}
| SQLContainsExpr |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/reporting/legacy/xml/IncrementingClock.java | {
"start": 496,
"end": 1149
} | class ____ extends Clock {
private final Duration duration;
private final ZoneId zone;
private int counter;
IncrementingClock(int start, Duration duration) {
this(start, duration, ZoneId.systemDefault());
}
private IncrementingClock(int start, Duration duration, ZoneId zone) {
this.counter = start;
this.duration = duration;
this.zone = zone;
}
@Override
public Instant instant() {
return Instant.EPOCH.plus(duration.multipliedBy(counter++));
}
@Override
public Clock withZone(ZoneId zone) {
return new IncrementingClock(counter, duration, zone);
}
@Override
public ZoneId getZone() {
return zone;
}
}
| IncrementingClock |
java | quarkusio__quarkus | integration-tests/oidc-dev-services/src/main/java/io/quarkus/it/oidc/dev/services/ExpiredUpdatedSecurityIdentityWebSocket.java | {
"start": 301,
"end": 698
} | class ____ {
private final WebSocketSecurity webSocketSecurity;
ExpiredUpdatedSecurityIdentityWebSocket(WebSocketSecurity webSocketSecurity) {
this.webSocketSecurity = webSocketSecurity;
}
@OnTextMessage
String bye(String accessToken) {
webSocketSecurity.updateSecurityIdentity(accessToken);
return "bye";
}
}
| ExpiredUpdatedSecurityIdentityWebSocket |
java | apache__spark | sql/core/src/test/java/test/org/apache/spark/sql/connector/catalog/functions/JavaLongAdd.java | {
"start": 2481,
"end": 3016
} | class ____ implements ScalarFunction<Long> {
private final boolean isResultNullable;
JavaLongAddBase(boolean isResultNullable) {
this.isResultNullable = isResultNullable;
}
@Override
public DataType[] inputTypes() {
return new DataType[] { DataTypes.LongType, DataTypes.LongType };
}
@Override
public DataType resultType() {
return DataTypes.LongType;
}
@Override
public boolean isResultNullable() {
return isResultNullable;
}
}
public static | JavaLongAddBase |
java | resilience4j__resilience4j | resilience4j-ratelimiter/src/main/java/io/github/resilience4j/ratelimiter/RateLimiterConfig.java | {
"start": 890,
"end": 5565
} | class ____ implements Serializable {
private static final long serialVersionUID = -1621614587284115957L;
private static final String TIMEOUT_DURATION_MUST_NOT_BE_NULL = "TimeoutDuration must not be null";
private static final String TIMEOUT_DURATION_TOO_LARGE = "TimeoutDuration too large";
private static final String LIMIT_REFRESH_PERIOD_MUST_NOT_BE_NULL = "LimitRefreshPeriod must not be null";
private static final String LIMIT_REFRESH_PERIOD_TOO_LARGE = "LimitRefreshPeriod too large";
private static final Duration ACCEPTABLE_REFRESH_PERIOD = Duration.ofNanos(1L);
private static final boolean DEFAULT_WRITABLE_STACK_TRACE_ENABLED = true;
private final Duration timeoutDuration;
private final Duration limitRefreshPeriod;
private final int limitForPeriod;
private final transient Predicate<Either<? extends Throwable, ?>> drainPermissionsOnResult;
private final boolean writableStackTraceEnabled;
private RateLimiterConfig(Duration timeoutDuration,
Duration limitRefreshPeriod,
int limitForPeriod,
Predicate<Either<? extends Throwable, ?>> drainPermissionsOnResult,
boolean writableStackTraceEnabled) {
this.timeoutDuration = timeoutDuration;
this.limitRefreshPeriod = limitRefreshPeriod;
this.limitForPeriod = limitForPeriod;
this.drainPermissionsOnResult = drainPermissionsOnResult;
this.writableStackTraceEnabled = writableStackTraceEnabled;
}
/**
* Returns a builder to create a custom RateLimiterConfig.
*
* @return a {@link RateLimiterConfig.Builder}
*/
public static Builder custom() {
return new Builder();
}
/**
* Returns a builder to create a custom RateLimiterConfig using specified config as prototype
*
* @param prototype A {@link RateLimiterConfig} prototype.
* @return a {@link RateLimiterConfig.Builder}
*/
public static Builder from(RateLimiterConfig prototype) {
return new Builder(prototype);
}
/**
* Creates a default RateLimiter configuration.
*
* @return a default RateLimiter configuration.
*/
public static RateLimiterConfig ofDefaults() {
return new Builder().build();
}
private static Duration checkTimeoutDuration(final Duration timeoutDuration) {
requireNonNull(timeoutDuration, TIMEOUT_DURATION_MUST_NOT_BE_NULL);
if (timeoutDuration.isNegative()) {
throw new IllegalArgumentException("TimeoutDuration must not be negative");
}
return validateDurationWithinRange(timeoutDuration, TIMEOUT_DURATION_TOO_LARGE);
}
private static Duration validateDurationWithinRange(Duration duration, String message) {
try {
return Duration.ofNanos(duration.toNanos()); // make sure there is no long overflow
} catch (Exception e) {
throw new RuntimeException(message, e);
}
}
private static Duration checkLimitRefreshPeriod(Duration limitRefreshPeriod) {
validateDurationWithinRange(
requireNonNull(limitRefreshPeriod, LIMIT_REFRESH_PERIOD_MUST_NOT_BE_NULL), LIMIT_REFRESH_PERIOD_TOO_LARGE);
boolean refreshPeriodIsTooShort =
limitRefreshPeriod.compareTo(ACCEPTABLE_REFRESH_PERIOD) < 0;
if (refreshPeriodIsTooShort) {
throw new IllegalArgumentException("LimitRefreshPeriod is too short");
}
return limitRefreshPeriod;
}
private static int checkLimitForPeriod(final int limitForPeriod) {
if (limitForPeriod < 1) {
throw new IllegalArgumentException("LimitForPeriod should be greater than 0");
}
return limitForPeriod;
}
public Duration getTimeoutDuration() {
return timeoutDuration;
}
public Duration getLimitRefreshPeriod() {
return limitRefreshPeriod;
}
public int getLimitForPeriod() {
return limitForPeriod;
}
public Predicate<Either<? extends Throwable, ?>> getDrainPermissionsOnResult() {
return drainPermissionsOnResult;
}
public boolean isWritableStackTraceEnabled() {
return writableStackTraceEnabled;
}
@Override
public String toString() {
return "RateLimiterConfig{" +
"timeoutDuration=" + timeoutDuration +
", limitRefreshPeriod=" + limitRefreshPeriod +
", limitForPeriod=" + limitForPeriod +
", writableStackTraceEnabled=" + writableStackTraceEnabled +
'}';
}
public static | RateLimiterConfig |
java | micronaut-projects__micronaut-core | test-suite/src/test/java/io/micronaut/docs/server/intro/HelloController.java | {
"start": 971,
"end": 1134
} | class ____ {
@Get(produces = MediaType.TEXT_PLAIN) // <2>
public String index() {
return "Hello World"; // <3>
}
}
// end::class[]
| HelloController |
java | quarkusio__quarkus | extensions/mongodb-client/runtime/src/main/java/io/quarkus/mongodb/reactive/ReactiveMongoDatabase.java | {
"start": 2933,
"end": 4236
} | class ____ use instead of {@code Document}.
* @return a {@link Uni} emitting the command result once completed
*/
<T> Uni<T> runCommand(Bson command, ReadPreference readPreference, Class<T> clazz);
/**
* Executes command in the context of the current database.
*
* @param clientSession the client session with which to associate this operation
* @param command the command to be run
* @return a {@link Uni} emitting the command result once completed
*/
Uni<Document> runCommand(ClientSession clientSession, Bson command);
/**
* Executes command in the context of the current database.
*
* @param clientSession the client session with which to associate this operation
* @param readPreference the {@link ReadPreference} to be used when executing the command
* @param command the command to be run
* @return a {@link Uni} emitting the command result once completed
*/
Uni<Document> runCommand(ClientSession clientSession, Bson command, ReadPreference readPreference);
/**
* Executes command in the context of the current database.
*
* @param clientSession the client session with which to associate this operation
* @param command the command to be run
* @param clazz the default | to |
java | apache__rocketmq | remoting/src/test/java/org/apache/rocketmq/remoting/protocol/body/ResetOffsetBodyTest.java | {
"start": 1117,
"end": 1855
} | class ____ {
@Test
public void testFromJson() throws Exception {
ResetOffsetBody rob = new ResetOffsetBody();
Map<MessageQueue, Long> offsetMap = new HashMap<>();
MessageQueue queue = new MessageQueue();
queue.setQueueId(1);
queue.setBrokerName("brokerName");
queue.setTopic("topic");
offsetMap.put(queue, 100L);
rob.setOffsetTable(offsetMap);
String json = RemotingSerializable.toJson(rob, true);
ResetOffsetBody fromJson = RemotingSerializable.fromJson(json, ResetOffsetBody.class);
assertThat(fromJson.getOffsetTable().get(queue)).isEqualTo(100L);
assertThat(fromJson.getOffsetTable().size()).isEqualTo(1);
}
}
| ResetOffsetBodyTest |
java | grpc__grpc-java | s2a/src/test/java/io/grpc/s2a/internal/handshaker/SslContextFactoryTest.java | {
"start": 1264,
"end": 6479
} | class ____ {
@Rule public final Expect expect = Expect.create();
private static final String FAKE_TARGET_NAME = "fake_target_name";
private S2AStub stub;
private FakeWriter writer;
@Before
public void setUp() {
writer = new FakeWriter();
stub = S2AStub.newInstanceForTesting(writer);
writer.setReader(stub.getReader());
}
@Test
public void createForClient_returnsValidSslContext() throws Exception {
SslContext sslContext =
SslContextFactory.createForClient(
stub, FAKE_TARGET_NAME, /* localIdentity= */ Optional.empty());
expect.that(sslContext).isNotNull();
expect.that(sslContext.sessionCacheSize()).isEqualTo(1);
expect.that(sslContext.sessionTimeout()).isEqualTo(300);
expect.that(sslContext.isClient()).isTrue();
expect.that(sslContext.applicationProtocolNegotiator().protocols()).containsExactly("h2");
SSLSessionContext sslSessionContext = sslContext.sessionContext();
if (sslSessionContext instanceof OpenSslSessionContext) {
OpenSslSessionContext openSslSessionContext = (OpenSslSessionContext) sslSessionContext;
expect.that(openSslSessionContext.isSessionCacheEnabled()).isFalse();
}
}
@Test
public void createForClient_withLocalIdentity_returnsValidSslContext() throws Exception {
SslContext sslContext =
SslContextFactory.createForClient(
stub, FAKE_TARGET_NAME, Optional.of(S2AIdentity.fromSpiffeId("fake-spiffe-id")));
expect.that(sslContext).isNotNull();
expect.that(sslContext.sessionCacheSize()).isEqualTo(1);
expect.that(sslContext.sessionTimeout()).isEqualTo(300);
expect.that(sslContext.isClient()).isTrue();
expect.that(sslContext.applicationProtocolNegotiator().protocols()).containsExactly("h2");
SSLSessionContext sslSessionContext = sslContext.sessionContext();
if (sslSessionContext instanceof OpenSslSessionContext) {
OpenSslSessionContext openSslSessionContext = (OpenSslSessionContext) sslSessionContext;
expect.that(openSslSessionContext.isSessionCacheEnabled()).isFalse();
}
}
@Test
public void createForClient_returnsEmptyResponse_error() throws Exception {
writer.setBehavior(FakeWriter.Behavior.EMPTY_RESPONSE);
S2AConnectionException expected =
assertThrows(
S2AConnectionException.class,
() ->
SslContextFactory.createForClient(
stub, FAKE_TARGET_NAME, /* localIdentity= */ Optional.empty()));
assertThat(expected)
.hasMessageThat()
.contains("Response from S2A server does NOT contain ClientTlsConfiguration.");
}
@Test
public void createForClient_returnsErrorStatus_error() throws Exception {
writer.setBehavior(FakeWriter.Behavior.ERROR_STATUS);
S2AConnectionException expected =
assertThrows(
S2AConnectionException.class,
() ->
SslContextFactory.createForClient(
stub, FAKE_TARGET_NAME, /* localIdentity= */ Optional.empty()));
assertThat(expected).hasMessageThat().contains("Intended ERROR Status from FakeWriter.");
}
@Test
public void createForClient_getsErrorFromServer_throwsError() throws Exception {
writer.sendIoError();
GeneralSecurityException expected =
assertThrows(
GeneralSecurityException.class,
() ->
SslContextFactory.createForClient(
stub, FAKE_TARGET_NAME, /* localIdentity= */ Optional.empty()));
assertThat(expected)
.hasMessageThat()
.contains("Failed to get client TLS configuration from S2A.");
}
@Test
public void createForClient_getsBadTlsVersionsFromServer_throwsError() throws Exception {
writer.setBehavior(FakeWriter.Behavior.BAD_TLS_VERSION_RESPONSE);
S2AConnectionException expected =
assertThrows(
S2AConnectionException.class,
() ->
SslContextFactory.createForClient(
stub, FAKE_TARGET_NAME, /* localIdentity= */ Optional.empty()));
assertThat(expected)
.hasMessageThat()
.contains("Set of TLS versions received from S2A server is empty or not supported.");
}
@Test
public void createForClient_nullStub_throwsError() throws Exception {
writer.sendUnexpectedResponse();
NullPointerException expected =
assertThrows(
NullPointerException.class,
() ->
SslContextFactory.createForClient(
/* stub= */ null, FAKE_TARGET_NAME, /* localIdentity= */ Optional.empty()));
assertThat(expected).hasMessageThat().isEqualTo("stub should not be null.");
}
@Test
public void createForClient_nullTargetName_throwsError() throws Exception {
writer.sendUnexpectedResponse();
NullPointerException expected =
assertThrows(
NullPointerException.class,
() ->
SslContextFactory.createForClient(
stub, /* targetName= */ null, /* localIdentity= */ Optional.empty()));
assertThat(expected)
.hasMessageThat()
.isEqualTo("targetName should not be null on client side.");
}
} | SslContextFactoryTest |
java | apache__camel | components/camel-google/camel-google-calendar/src/generated/java/org/apache/camel/component/google/calendar/CalendarAclEndpointConfigurationConfigurer.java | {
"start": 752,
"end": 10453
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("AccessToken", java.lang.String.class);
map.put("ApiName", org.apache.camel.component.google.calendar.internal.GoogleCalendarApiName.class);
map.put("ApplicationName", java.lang.String.class);
map.put("CalendarId", java.lang.String.class);
map.put("ClientId", java.lang.String.class);
map.put("ClientSecret", java.lang.String.class);
map.put("Content", com.google.api.services.calendar.model.AclRule.class);
map.put("ContentChannel", com.google.api.services.calendar.model.Channel.class);
map.put("Delegate", java.lang.String.class);
map.put("EmailAddress", java.lang.String.class);
map.put("MaxResults", java.lang.Integer.class);
map.put("MethodName", java.lang.String.class);
map.put("P12FileName", java.lang.String.class);
map.put("PageToken", java.lang.String.class);
map.put("RefreshToken", java.lang.String.class);
map.put("RuleId", java.lang.String.class);
map.put("Scopes", java.lang.String.class);
map.put("SendNotifications", java.lang.Boolean.class);
map.put("ServiceAccountKey", java.lang.String.class);
map.put("ShowDeleted", java.lang.Boolean.class);
map.put("SyncToken", java.lang.String.class);
map.put("User", java.lang.String.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.google.calendar.CalendarAclEndpointConfiguration target = (org.apache.camel.component.google.calendar.CalendarAclEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": target.setAccessToken(property(camelContext, java.lang.String.class, value)); return true;
case "apiname":
case "apiName": target.setApiName(property(camelContext, org.apache.camel.component.google.calendar.internal.GoogleCalendarApiName.class, value)); return true;
case "applicationname":
case "applicationName": target.setApplicationName(property(camelContext, java.lang.String.class, value)); return true;
case "calendarid":
case "calendarId": target.setCalendarId(property(camelContext, java.lang.String.class, value)); return true;
case "clientid":
case "clientId": target.setClientId(property(camelContext, java.lang.String.class, value)); return true;
case "clientsecret":
case "clientSecret": target.setClientSecret(property(camelContext, java.lang.String.class, value)); return true;
case "content": target.setContent(property(camelContext, com.google.api.services.calendar.model.AclRule.class, value)); return true;
case "contentchannel":
case "contentChannel": target.setContentChannel(property(camelContext, com.google.api.services.calendar.model.Channel.class, value)); return true;
case "delegate": target.setDelegate(property(camelContext, java.lang.String.class, value)); return true;
case "emailaddress":
case "emailAddress": target.setEmailAddress(property(camelContext, java.lang.String.class, value)); return true;
case "maxresults":
case "maxResults": target.setMaxResults(property(camelContext, java.lang.Integer.class, value)); return true;
case "methodname":
case "methodName": target.setMethodName(property(camelContext, java.lang.String.class, value)); return true;
case "p12filename":
case "p12FileName": target.setP12FileName(property(camelContext, java.lang.String.class, value)); return true;
case "pagetoken":
case "pageToken": target.setPageToken(property(camelContext, java.lang.String.class, value)); return true;
case "refreshtoken":
case "refreshToken": target.setRefreshToken(property(camelContext, java.lang.String.class, value)); return true;
case "ruleid":
case "ruleId": target.setRuleId(property(camelContext, java.lang.String.class, value)); return true;
case "scopes": target.setScopes(property(camelContext, java.lang.String.class, value)); return true;
case "sendnotifications":
case "sendNotifications": target.setSendNotifications(property(camelContext, java.lang.Boolean.class, value)); return true;
case "serviceaccountkey":
case "serviceAccountKey": target.setServiceAccountKey(property(camelContext, java.lang.String.class, value)); return true;
case "showdeleted":
case "showDeleted": target.setShowDeleted(property(camelContext, java.lang.Boolean.class, value)); return true;
case "synctoken":
case "syncToken": target.setSyncToken(property(camelContext, java.lang.String.class, value)); return true;
case "user": target.setUser(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": return java.lang.String.class;
case "apiname":
case "apiName": return org.apache.camel.component.google.calendar.internal.GoogleCalendarApiName.class;
case "applicationname":
case "applicationName": return java.lang.String.class;
case "calendarid":
case "calendarId": return java.lang.String.class;
case "clientid":
case "clientId": return java.lang.String.class;
case "clientsecret":
case "clientSecret": return java.lang.String.class;
case "content": return com.google.api.services.calendar.model.AclRule.class;
case "contentchannel":
case "contentChannel": return com.google.api.services.calendar.model.Channel.class;
case "delegate": return java.lang.String.class;
case "emailaddress":
case "emailAddress": return java.lang.String.class;
case "maxresults":
case "maxResults": return java.lang.Integer.class;
case "methodname":
case "methodName": return java.lang.String.class;
case "p12filename":
case "p12FileName": return java.lang.String.class;
case "pagetoken":
case "pageToken": return java.lang.String.class;
case "refreshtoken":
case "refreshToken": return java.lang.String.class;
case "ruleid":
case "ruleId": return java.lang.String.class;
case "scopes": return java.lang.String.class;
case "sendnotifications":
case "sendNotifications": return java.lang.Boolean.class;
case "serviceaccountkey":
case "serviceAccountKey": return java.lang.String.class;
case "showdeleted":
case "showDeleted": return java.lang.Boolean.class;
case "synctoken":
case "syncToken": return java.lang.String.class;
case "user": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.google.calendar.CalendarAclEndpointConfiguration target = (org.apache.camel.component.google.calendar.CalendarAclEndpointConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": return target.getAccessToken();
case "apiname":
case "apiName": return target.getApiName();
case "applicationname":
case "applicationName": return target.getApplicationName();
case "calendarid":
case "calendarId": return target.getCalendarId();
case "clientid":
case "clientId": return target.getClientId();
case "clientsecret":
case "clientSecret": return target.getClientSecret();
case "content": return target.getContent();
case "contentchannel":
case "contentChannel": return target.getContentChannel();
case "delegate": return target.getDelegate();
case "emailaddress":
case "emailAddress": return target.getEmailAddress();
case "maxresults":
case "maxResults": return target.getMaxResults();
case "methodname":
case "methodName": return target.getMethodName();
case "p12filename":
case "p12FileName": return target.getP12FileName();
case "pagetoken":
case "pageToken": return target.getPageToken();
case "refreshtoken":
case "refreshToken": return target.getRefreshToken();
case "ruleid":
case "ruleId": return target.getRuleId();
case "scopes": return target.getScopes();
case "sendnotifications":
case "sendNotifications": return target.getSendNotifications();
case "serviceaccountkey":
case "serviceAccountKey": return target.getServiceAccountKey();
case "showdeleted":
case "showDeleted": return target.getShowDeleted();
case "synctoken":
case "syncToken": return target.getSyncToken();
case "user": return target.getUser();
default: return null;
}
}
}
| CalendarAclEndpointConfigurationConfigurer |
java | apache__camel | components/camel-aws/camel-aws2-kms/src/main/java/org/apache/camel/component/aws2/kms/client/impl/KMS2ClientOptimizedImpl.java | {
"start": 1775,
"end": 4283
} | class ____ implements KMS2InternalClient {
private static final Logger LOG = LoggerFactory.getLogger(KMS2ClientOptimizedImpl.class);
private KMS2Configuration configuration;
/**
* Constructor that uses the config file.
*/
public KMS2ClientOptimizedImpl(KMS2Configuration configuration) {
LOG.trace("Creating an AWS KMS client for an ec2 instance with IAM temporary credentials (normal for ec2s).");
this.configuration = configuration;
}
/**
* Getting the KMS aws client that is used.
*
* @return KMS Client.
*/
@Override
public KmsClient getKmsClient() {
KmsClient client = null;
KmsClientBuilder clientBuilder = KmsClient.builder();
ProxyConfiguration.Builder proxyConfig = null;
ApacheHttpClient.Builder httpClientBuilder = null;
if (ObjectHelper.isNotEmpty(configuration.getProxyHost()) && ObjectHelper.isNotEmpty(configuration.getProxyPort())) {
proxyConfig = ProxyConfiguration.builder();
URI proxyEndpoint = URI.create(configuration.getProxyProtocol() + "://" + configuration.getProxyHost() + ":"
+ configuration.getProxyPort());
proxyConfig.endpoint(proxyEndpoint);
httpClientBuilder = ApacheHttpClient.builder().proxyConfiguration(proxyConfig.build());
clientBuilder = clientBuilder.httpClientBuilder(httpClientBuilder);
}
if (ObjectHelper.isNotEmpty(configuration.getRegion())) {
clientBuilder = clientBuilder.region(Region.of(configuration.getRegion()));
}
if (configuration.isOverrideEndpoint()) {
clientBuilder.endpointOverride(URI.create(configuration.getUriEndpointOverride()));
}
if (configuration.isTrustAllCertificates()) {
if (httpClientBuilder == null) {
httpClientBuilder = ApacheHttpClient.builder();
}
SdkHttpClient ahc = httpClientBuilder.buildWithDefaults(AttributeMap
.builder()
.put(
SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES,
Boolean.TRUE)
.build());
// set created http client to use instead of builder
clientBuilder.httpClient(ahc);
clientBuilder.httpClientBuilder(null);
}
client = clientBuilder.build();
return client;
}
}
| KMS2ClientOptimizedImpl |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/support/DefaultBootstrapContext.java | {
"start": 1146,
"end": 1440
} | class ____ implements BootstrapContext {
private final Class<?> testClass;
private final CacheAwareContextLoaderDelegate cacheAwareContextLoaderDelegate;
/**
* Construct a new {@code DefaultBootstrapContext} from the supplied arguments.
* @param testClass the test | DefaultBootstrapContext |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java | {
"start": 8596,
"end": 8647
} | class ____ a numeric rule.
*/
private | defining |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/query/JpqlQueryIntrospector.java | {
"start": 856,
"end": 2830
} | class ____ extends JpqlBaseVisitor<Void> implements ParsedQueryIntrospector<QueryInformation> {
private final JpqlQueryRenderer renderer = new JpqlQueryRenderer();
private final QueryInformationHolder introspection = new QueryInformationHolder();
@Override
public QueryInformation getParsedQueryInformation() {
return new QueryInformation(introspection);
}
@Override
public Void visitSelectQuery(JpqlParser.SelectQueryContext ctx) {
introspection.setStatementType(QueryInformation.StatementType.SELECT);
return super.visitSelectQuery(ctx);
}
@Override
public Void visitFromQuery(JpqlParser.FromQueryContext ctx) {
introspection.setStatementType(QueryInformation.StatementType.SELECT);
return super.visitFromQuery(ctx);
}
@Override
public Void visitUpdate_statement(JpqlParser.Update_statementContext ctx) {
introspection.setStatementType(QueryInformation.StatementType.UPDATE);
return super.visitUpdate_statement(ctx);
}
@Override
public Void visitDelete_statement(JpqlParser.Delete_statementContext ctx) {
introspection.setStatementType(QueryInformation.StatementType.DELETE);
return super.visitDelete_statement(ctx);
}
@Override
public Void visitSelect_clause(JpqlParser.Select_clauseContext ctx) {
introspection.captureProjection(ctx.select_item(), renderer::visitSelect_item);
return super.visitSelect_clause(ctx);
}
@Override
public Void visitRange_variable_declaration(JpqlParser.Range_variable_declarationContext ctx) {
if (ctx.identification_variable() != null && !JpqlQueryRenderer.isSubquery(ctx)
&& !JpqlQueryRenderer.isSetQuery(ctx)) {
introspection.capturePrimaryAlias(ctx.identification_variable().getText());
}
return super.visitRange_variable_declaration(ctx);
}
@Override
public Void visitConstructor_expression(JpqlParser.Constructor_expressionContext ctx) {
introspection.constructorExpressionPresent();
return super.visitConstructor_expression(ctx);
}
}
| JpqlQueryIntrospector |
java | redisson__redisson | redisson-spring-data/redisson-spring-data-16/src/test/java/org/redisson/BaseTest.java | {
"start": 223,
"end": 2494
} | class ____ {
protected RedissonClient redisson;
protected static RedissonClient defaultRedisson;
@BeforeClass
public static void beforeClass() throws IOException, InterruptedException {
RedisRunner.startDefaultRedisServerInstance();
defaultRedisson = createInstance();
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
defaultRedisson.shutdown();
try {
RedisRunner.shutDownDefaultRedisServerInstance();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
});
}
@Before
public void before() throws IOException, InterruptedException {
if (redisson == null) {
redisson = defaultRedisson;
}
if (flushBetweenTests()) {
redisson.getKeys().flushall();
}
}
@After
public void after() throws InterruptedException {
}
public static Config createConfig() {
// String redisAddress = System.getProperty("redisAddress");
// if (redisAddress == null) {
// redisAddress = "127.0.0.1:6379";
// }
Config config = new Config();
// config.setCodec(new MsgPackJacksonCodec());
// config.useSentinelServers().setMasterName("mymaster").addSentinelAddress("127.0.0.1:26379", "127.0.0.1:26389");
// config.useClusterServers().addNodeAddress("127.0.0.1:7004", "127.0.0.1:7001", "127.0.0.1:7000");
config.useSingleServer()
.setAddress(RedisRunner.getDefaultRedisServerBindAddressAndPort());
// .setPassword("mypass1");
// config.useMasterSlaveConnection()
// .setMasterAddress("127.0.0.1:6379")
// .addSlaveAddress("127.0.0.1:6399")
// .addSlaveAddress("127.0.0.1:6389");
return config;
}
public static RedissonClient createInstance() {
Config config = createConfig();
return Redisson.create(config);
}
protected boolean flushBetweenTests() {
return true;
}
}
| BaseTest |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java | {
"start": 46055,
"end": 119660
} | enum ____ {
/**
* We're accessing the container for a pure read operation, e.g. read a
* file.
*/
PureRead,
/**
* We're accessing the container purely to write something, e.g. write a
* file.
*/
PureWrite,
/**
* We're accessing the container to read something then write, e.g. rename a
* file.
*/
ReadThenWrite
}
/**
* Trims a suffix/prefix from the given string. For example if
* s is given as "/xy" and toTrim is "/", this method returns "xy"
*/
private static String trim(String s, String toTrim) {
return StringUtils.removeEnd(StringUtils.removeStart(s, toTrim),
toTrim);
}
/**
* Checks if the given rawDir belongs to this account/container, and
* if so returns the canonicalized path for it. Otherwise return null.
*/
private String verifyAndConvertToStandardFormat(String rawDir) throws URISyntaxException {
URI asUri = new URI(rawDir);
if (asUri.getAuthority() == null
|| asUri.getAuthority().toLowerCase(Locale.ENGLISH).equalsIgnoreCase(
sessionUri.getAuthority().toLowerCase(Locale.ENGLISH))) {
// Applies to me.
return trim(asUri.getPath(), "/");
} else {
// Doen't apply to me.
return null;
}
}
/**
* Take a comma-separated list of directories from a configuration variable
* and transform it to a set of directories.
*/
private Set<String> getDirectorySet(final String configVar)
throws AzureException {
String[] rawDirs = sessionConfiguration.getStrings(configVar, new String[0]);
Set<String> directorySet = new HashSet<String>();
for (String currentDir : rawDirs) {
String myDir;
try {
myDir = verifyAndConvertToStandardFormat(currentDir.trim());
} catch (URISyntaxException ex) {
throw new AzureException(String.format(
"The directory %s specified in the configuration entry %s is not"
+ " a valid URI.",
currentDir, configVar));
}
if (myDir != null) {
directorySet.add(myDir);
}
}
return directorySet;
}
/**
* Checks if the given key in Azure Storage should be stored as a page
* blob instead of block blob.
*/
public boolean isPageBlobKey(String key) {
return isKeyForDirectorySet(key, pageBlobDirs);
}
/**
* Checks if the given key in Azure Storage should be stored as a block blobs
* with compaction enabled instead of normal block blob.
*
* @param key blob name
* @return true, if the file is in directory with block compaction enabled.
*/
public boolean isBlockBlobWithCompactionKey(String key) {
return isKeyForDirectorySet(key, blockBlobWithCompationDirs);
}
/**
* Checks if the given key in Azure storage should have synchronized
* atomic folder rename createNonRecursive implemented.
*/
@Override
public boolean isAtomicRenameKey(String key) {
return isKeyForDirectorySet(key, atomicRenameDirs);
}
public boolean isKeyForDirectorySet(String key, Set<String> dirSet) {
String defaultFS = FileSystem.getDefaultUri(sessionConfiguration).toString();
for (String dir : dirSet) {
if (dir.isEmpty()) {
// dir is root
return true;
}
if (matchAsteriskPattern(key, dir)) {
return true;
}
// Allow for blob directories with paths relative to the default file
// system.
//
try {
URI uriPageBlobDir = new URI(dir);
if (null == uriPageBlobDir.getAuthority()) {
// Concatenate the default file system prefix with the relative
// page blob directory path.
//
String dirWithPrefix = trim(defaultFS, "/") + "/" + dir;
if (matchAsteriskPattern(key, dirWithPrefix)) {
return true;
}
}
} catch (URISyntaxException e) {
LOG.info("URI syntax error creating URI for {}", dir);
}
}
return false;
}
private boolean matchAsteriskPattern(String pathName, String pattern) {
if (pathName == null || pathName.length() == 0) {
return false;
}
int pathIndex = 0;
int patternIndex = 0;
while (pathIndex < pathName.length() && patternIndex < pattern.length()) {
char charToMatch = pattern.charAt(patternIndex);
// normal char:
if (charToMatch != ASTERISK_SYMBOL) {
if (charToMatch != pathName.charAt(pathIndex)) {
return false;
}
pathIndex++;
patternIndex++;
continue;
}
// ASTERISK_SYMBOL
// 1. * is used in path name: *a/b,a*/b, a/*b, a/b*
if (patternIndex > 0 && pattern.charAt(patternIndex - 1) != Path.SEPARATOR_CHAR
|| patternIndex + 1 < pattern.length() && pattern.charAt(patternIndex + 1) != Path.SEPARATOR_CHAR) {
if (ASTERISK_SYMBOL != pathName.charAt(pathIndex)) {
return false;
}
pathIndex++;
patternIndex++;
continue;
}
// 2. * is used as wildcard: */a, a/*/b, a/*
patternIndex++;
// find next path separator
while (pathIndex < pathName.length() && pathName.charAt(pathIndex) != Path.SEPARATOR_CHAR) {
pathIndex++;
}
}
// Ensure it is not a file/dir which shares same prefix as pattern
// Eg: pattern: /A/B, pathName: /A/BBB should not match
return patternIndex == pattern.length()
&& (pathIndex == pathName.length() || pathName.charAt(pathIndex) == Path.SEPARATOR_CHAR);
}
/**
* Returns the file block size. This is a fake value used for integration
* of the Azure store with Hadoop.
*/
@Override
public long getHadoopBlockSize() {
return hadoopBlockSize;
}
/**
* This should be called from any method that does any modifications to the
* underlying container: it makes sure to put the WASB current version in the
* container's metadata if it's not already there.
*/
private ContainerState checkContainer(ContainerAccessType accessType)
throws StorageException, AzureException {
synchronized (containerStateLock) {
if (isOkContainerState(accessType)) {
return currentKnownContainerState;
}
if (currentKnownContainerState == ContainerState.ExistsAtWrongVersion) {
String containerVersion = retrieveVersionAttribute(container);
throw wrongVersionException(containerVersion);
}
// This means I didn't check it before or it didn't exist or
// we need to stamp the version. Since things may have changed by
// other machines since then, do the check again and don't depend
// on past information.
// Sanity check: we don't expect this at this point.
if (currentKnownContainerState == ContainerState.ExistsAtRightVersion) {
throw new AssertionError("Unexpected state: "
+ currentKnownContainerState);
}
// Download the attributes - doubles as an existence check with just
// one service call
try {
container.downloadAttributes(getInstrumentedContext());
currentKnownContainerState = ContainerState.Unknown;
} catch (StorageException ex) {
if (StorageErrorCodeStrings.CONTAINER_NOT_FOUND.toString()
.equals(ex.getErrorCode())) {
currentKnownContainerState = ContainerState.DoesntExist;
} else {
throw ex;
}
}
if (currentKnownContainerState == ContainerState.DoesntExist) {
// If the container doesn't exist and we intend to write to it,
// create it now.
if (needToCreateContainer(accessType)) {
storeVersionAttribute(container);
container.create(getInstrumentedContext());
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
} else {
// The container exists, check the version.
String containerVersion = retrieveVersionAttribute(container);
if (containerVersion != null) {
if (containerVersion.equals(FIRST_WASB_VERSION)) {
// It's the version from when WASB was called ASV, just
// fix the version attribute if needed and proceed.
// We should be good otherwise.
if (needToStampVersion(accessType)) {
storeVersionAttribute(container);
container.uploadMetadata(getInstrumentedContext());
}
} else if (!containerVersion.equals(CURRENT_WASB_VERSION)) {
// Don't know this version - throw.
currentKnownContainerState = ContainerState.ExistsAtWrongVersion;
throw wrongVersionException(containerVersion);
} else {
// It's our correct version.
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
} else {
// No version info exists.
currentKnownContainerState = ContainerState.ExistsNoVersion;
if (needToStampVersion(accessType)) {
// Need to stamp the version
storeVersionAttribute(container);
container.uploadMetadata(getInstrumentedContext());
currentKnownContainerState = ContainerState.ExistsAtRightVersion;
}
}
}
return currentKnownContainerState;
}
}
private AzureException wrongVersionException(String containerVersion) {
return new AzureException("The container " + container.getName()
+ " is at an unsupported version: " + containerVersion
+ ". Current supported version: " + FIRST_WASB_VERSION);
}
private boolean needToStampVersion(ContainerAccessType accessType) {
// We need to stamp the version on the container any time we write to
// it and we have the correct credentials to be able to write container
// metadata.
return accessType != ContainerAccessType.PureRead
&& canCreateOrModifyContainer;
}
private static boolean needToCreateContainer(ContainerAccessType accessType) {
// We need to pro-actively create the container (if it doesn't exist) if
// we're doing a pure write. No need to create it for pure read or read-
// then-write access.
return accessType == ContainerAccessType.PureWrite;
}
// Determines whether we have to pull the container information again
// or we can work based off what we already have.
private boolean isOkContainerState(ContainerAccessType accessType) {
switch (currentKnownContainerState) {
case Unknown:
// When using SAS, we can't discover container attributes
// so just live with Unknown state and fail later if it
// doesn't exist.
return connectingUsingSAS;
case DoesntExist:
return false; // the container could have been created
case ExistsAtRightVersion:
return true; // fine to optimize
case ExistsAtWrongVersion:
return false;
case ExistsNoVersion:
// If there's no version, it's OK if we don't need to stamp the version
// or we can't anyway even if we wanted to.
return !needToStampVersion(accessType);
default:
throw new AssertionError("Unknown access type: " + accessType);
}
}
private boolean getUseTransactionalContentMD5() {
return sessionConfiguration.getBoolean(KEY_CHECK_BLOCK_MD5, true);
}
private BlobRequestOptions getUploadOptions() {
BlobRequestOptions options = new BlobRequestOptions();
options.setStoreBlobContentMD5(sessionConfiguration.getBoolean(
KEY_STORE_BLOB_MD5, false));
options.setUseTransactionalContentMD5(getUseTransactionalContentMD5());
options.setConcurrentRequestCount(concurrentWrites);
options.setRetryPolicyFactory(new RetryExponentialRetry(minBackoff,
deltaBackoff, maxBackoff, maxRetries));
return options;
}
private BlobRequestOptions getDownloadOptions() {
BlobRequestOptions options = new BlobRequestOptions();
options.setRetryPolicyFactory(
new RetryExponentialRetry(minBackoff, deltaBackoff, maxBackoff, maxRetries));
options.setUseTransactionalContentMD5(getUseTransactionalContentMD5());
return options;
}
@Override
public DataOutputStream storefile(String keyEncoded,
PermissionStatus permissionStatus,
String key)
throws AzureException {
try {
// Check if a session exists, if not create a session with the
// Azure storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AzureException(errMsg);
}
// Check if there is an authenticated account associated with the
// file this instance of the WASB file system. If not the file system
// has not been authenticated and all access is anonymous.
if (!isAuthenticatedAccess()) {
// Preemptively raise an exception indicating no uploads are
// allowed to anonymous accounts.
throw new AzureException(new IOException(
"Uploads to public accounts using anonymous "
+ "access is prohibited."));
}
checkContainer(ContainerAccessType.PureWrite);
/**
* Note: Windows Azure Blob Storage does not allow the creation of arbitrary directory
* paths under the default $root directory. This is by design to eliminate
* ambiguity in specifying a implicit blob address. A blob in the $root conatiner
* cannot include a / in its name and must be careful not to include a trailing
* '/' when referencing blobs in the $root container.
* A '/; in the $root container permits ambiguous blob names as in the following
* example involving two containers $root and mycontainer:
* http://myaccount.blob.core.windows.net/$root
* http://myaccount.blob.core.windows.net/mycontainer
* If the URL "mycontainer/somefile.txt were allowed in $root then the URL:
* http://myaccount.blob.core.windows.net/mycontainer/myblob.txt
* could mean either:
* (1) container=mycontainer; blob=myblob.txt
* (2) container=$root; blob=mycontainer/myblob.txt
*
* To avoid this type of ambiguity the Azure blob storage prevents
* arbitrary path under $root. For a simple and more consistent user
* experience it was decided to eliminate the opportunity for creating
* such paths by making the $root container read-only under WASB.
*/
// Check that no attempt is made to write to blobs on default
// $root containers.
if (AZURE_ROOT_CONTAINER.equals(getContainerFromAuthority(sessionUri))) {
// Azure containers are restricted to non-root containers.
final String errMsg = String.format(
"Writes to '%s' container for URI '%s' are prohibited, "
+ "only updates on non-root containers permitted.",
AZURE_ROOT_CONTAINER, sessionUri.toString());
throw new AzureException(errMsg);
}
// Get the blob reference from the store's container and
// return it.
CloudBlobWrapper blob = getBlobReference(keyEncoded);
storePermissionStatus(blob, permissionStatus);
// Create the output stream for the Azure blob.
//
OutputStream outputStream;
if (isBlockBlobWithCompactionKey(key)) {
BlockBlobAppendStream blockBlobOutputStream = new BlockBlobAppendStream(
(CloudBlockBlobWrapper) blob,
keyEncoded,
this.uploadBlockSizeBytes,
true,
getInstrumentedContext());
outputStream = blockBlobOutputStream;
} else {
outputStream = openOutputStream(blob);
}
DataOutputStream dataOutStream = new SyncableDataOutputStream(outputStream);
return dataOutStream;
} catch (Exception e) {
// Caught exception while attempting to open the blob output stream.
// Re-throw as an Azure storage exception.
throw new AzureException(e);
}
}
/**
* Opens a new output stream to the given blob (page or block blob)
* to populate it from scratch with data.
*/
private OutputStream openOutputStream(final CloudBlobWrapper blob)
throws StorageException {
if (blob instanceof CloudPageBlobWrapper){
return new PageBlobOutputStream(
(CloudPageBlobWrapper) blob, getInstrumentedContext(), sessionConfiguration);
} else {
// Handle both ClouldBlockBlobWrapperImpl and (only for the test code path)
// MockCloudBlockBlobWrapper.
return ((CloudBlockBlobWrapper) blob).openOutputStream(getUploadOptions(),
getInstrumentedContext());
}
}
/**
* Opens a new input stream for the given blob (page or block blob)
* to read its data.
*/
private InputStream openInputStream(CloudBlobWrapper blob,
Optional<Configuration> options) throws StorageException, IOException {
if (blob instanceof CloudBlockBlobWrapper) {
LOG.debug("Using stream seek algorithm {}", inputStreamVersion);
switch(inputStreamVersion) {
case 1:
return blob.openInputStream(getDownloadOptions(),
getInstrumentedContext(isConcurrentOOBAppendAllowed()));
case 2:
boolean bufferedPreadDisabled = options.map(c -> c
.getBoolean(FS_AZURE_BLOCK_BLOB_BUFFERED_PREAD_DISABLE, false))
.orElse(false);
return new BlockBlobInputStream((CloudBlockBlobWrapper) blob,
getDownloadOptions(),
getInstrumentedContext(isConcurrentOOBAppendAllowed()),
bufferedPreadDisabled);
default:
throw new IOException("Unknown seek algorithm: " + inputStreamVersion);
}
} else {
return new PageBlobInputStream(
(CloudPageBlobWrapper) blob, getInstrumentedContext(
isConcurrentOOBAppendAllowed()));
}
}
/**
* Default permission to use when no permission metadata is found.
*
* @return The default permission to use.
*/
private static PermissionStatus defaultPermissionNoBlobMetadata() {
return new PermissionStatus("", "", FsPermission.getDefault());
}
private static void storeMetadataAttribute(CloudBlobWrapper blob,
String key, String value) {
HashMap<String, String> metadata = blob.getMetadata();
if (null == metadata) {
metadata = new HashMap<String, String>();
}
metadata.put(key, value);
blob.setMetadata(metadata);
}
private String getMetadataAttribute(HashMap<String, String> metadata,
String... keyAlternatives) {
if (null == metadata) {
return null;
}
for (String key : keyAlternatives) {
if (metadataKeyCaseSensitive) {
if (metadata.containsKey(key)) {
return metadata.get(key);
}
} else {
// See HADOOP-17643 for details on why this case insensitive metadata
// checks been added
for (Entry<String, String> entry : metadata.entrySet()) {
if (key.equalsIgnoreCase(entry.getKey())) {
return entry.getValue();
}
}
}
}
return null;
}
private static void removeMetadataAttribute(CloudBlobWrapper blob,
String key) {
HashMap<String, String> metadata = blob.getMetadata();
if (metadata != null) {
metadata.remove(key);
blob.setMetadata(metadata);
}
}
private static void storePermissionStatus(CloudBlobWrapper blob,
PermissionStatus permissionStatus) {
storeMetadataAttribute(blob, PERMISSION_METADATA_KEY,
PERMISSION_JSON_SERIALIZER.toJSON(permissionStatus));
// Remove the old metadata key if present
removeMetadataAttribute(blob, OLD_PERMISSION_METADATA_KEY);
}
private PermissionStatus getPermissionStatus(CloudBlobWrapper blob) {
String permissionMetadataValue = getMetadataAttribute(blob.getMetadata(),
PERMISSION_METADATA_KEY, OLD_PERMISSION_METADATA_KEY);
if (permissionMetadataValue != null) {
return PermissionStatusJsonSerializer.fromJSONString(
permissionMetadataValue);
} else {
return defaultPermissionNoBlobMetadata();
}
}
private static void storeFolderAttribute(CloudBlobWrapper blob) {
storeMetadataAttribute(blob, IS_FOLDER_METADATA_KEY, "true");
// Remove the old metadata key if present
removeMetadataAttribute(blob, OLD_IS_FOLDER_METADATA_KEY);
}
private static String encodeMetadataAttribute(String value) throws UnsupportedEncodingException {
// We have to URL encode the attribute as it could
// have URI special characters which unless encoded will result
// in 403 errors from the server. This is due to metadata properties
// being sent in the HTTP header of the request which is in turn used
// on the server side to authorize the request.
return value == null ? null : URLEncoder.encode(value, METADATA_ENCODING.name());
}
private static String decodeMetadataAttribute(String encoded) throws UnsupportedEncodingException {
return encoded == null ? null : URLDecoder.decode(encoded, METADATA_ENCODING.name());
}
private static String ensureValidAttributeName(String attribute) {
// Attribute names must be valid C# identifiers so we have to
// convert the namespace dots (e.g. "user.something") in the
// attribute names. Using underscores here to be consistent with
// the constant metadata keys defined earlier in the file
return attribute.replace('.', '_');
}
private static void storeLinkAttribute(CloudBlobWrapper blob,
String linkTarget) throws UnsupportedEncodingException {
String encodedLinkTarget = encodeMetadataAttribute(linkTarget);
storeMetadataAttribute(blob,
LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY,
encodedLinkTarget);
// Remove the old metadata key if present
removeMetadataAttribute(blob,
OLD_LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
}
private String getLinkAttributeValue(CloudBlobWrapper blob)
throws UnsupportedEncodingException {
String encodedLinkTarget = getMetadataAttribute(blob.getMetadata(),
LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY,
OLD_LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
return decodeMetadataAttribute(encodedLinkTarget);
}
private boolean retrieveFolderAttribute(CloudBlobWrapper blob) {
HashMap<String, String> metadata = blob.getMetadata();
if (null != metadata) {
if (metadataKeyCaseSensitive) {
return metadata.containsKey(IS_FOLDER_METADATA_KEY)
|| metadata.containsKey(OLD_IS_FOLDER_METADATA_KEY);
} else {
// See HADOOP-17643 for details on why this case insensitive metadata
// checks been added
for (String key : metadata.keySet()) {
if (key.equalsIgnoreCase(IS_FOLDER_METADATA_KEY)
|| key.equalsIgnoreCase(OLD_IS_FOLDER_METADATA_KEY)) {
return true;
}
}
}
}
return false;
}
private static void storeVersionAttribute(CloudBlobContainerWrapper container) {
HashMap<String, String> metadata = container.getMetadata();
if (null == metadata) {
metadata = new HashMap<String, String>();
}
metadata.put(VERSION_METADATA_KEY, CURRENT_WASB_VERSION);
if (metadata.containsKey(OLD_VERSION_METADATA_KEY)) {
metadata.remove(OLD_VERSION_METADATA_KEY);
}
container.setMetadata(metadata);
}
private String retrieveVersionAttribute(CloudBlobContainerWrapper container) {
return getMetadataAttribute(container.getMetadata(), VERSION_METADATA_KEY,
OLD_VERSION_METADATA_KEY);
}
@Override
public void storeEmptyFolder(String key, PermissionStatus permissionStatus)
throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
// Check if there is an authenticated account associated with the file
// this instance of the WASB file system. If not the file system has not
// been authenticated and all access is anonymous.
if (!isAuthenticatedAccess()) {
// Preemptively raise an exception indicating no uploads are
// allowed to anonymous accounts.
throw new AzureException(
"Uploads to to public accounts using anonymous access is prohibited.");
}
try {
checkContainer(ContainerAccessType.PureWrite);
CloudBlobWrapper blob = getBlobReference(key);
storePermissionStatus(blob, permissionStatus);
storeFolderAttribute(blob);
openOutputStream(blob).close();
} catch (StorageException e) {
// Caught exception while attempting upload. Re-throw as an Azure
// storage exception.
throw new AzureException(e);
} catch (URISyntaxException e) {
throw new AzureException(e);
} catch (IOException e) {
Throwable t = e.getCause();
if (t instanceof StorageException) {
StorageException se = (StorageException) t;
// If we got this exception, the blob should have already been created
if (!"LeaseIdMissing".equals(se.getErrorCode())) {
throw new AzureException(e);
}
} else {
throw new AzureException(e);
}
}
}
/**
* Stores an empty blob that's linking to the temporary file where're we're
* uploading the initial data.
*/
@Override
public void storeEmptyLinkFile(String key, String tempBlobKey,
PermissionStatus permissionStatus) throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
// Check if there is an authenticated account associated with the file
// this instance of the WASB file system. If not the file system has not
// been authenticated and all access is anonymous.
if (!isAuthenticatedAccess()) {
// Preemptively raise an exception indicating no uploads are
// allowed to anonymous accounts.
throw new AzureException(
"Uploads to to public accounts using anonymous access is prohibited.");
}
try {
checkContainer(ContainerAccessType.PureWrite);
CloudBlobWrapper blob = getBlobReference(key);
storePermissionStatus(blob, permissionStatus);
storeLinkAttribute(blob, tempBlobKey);
openOutputStream(blob).close();
} catch (Exception e) {
// Caught exception while attempting upload. Re-throw as an Azure
// storage exception.
throw new AzureException(e);
}
}
/**
* If the blob with the given key exists and has a link in its metadata to a
* temporary file (see storeEmptyLinkFile), this method returns the key to
* that temporary file. Otherwise, returns null.
*/
@Override
public String getLinkInFileMetadata(String key) throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
try {
checkContainer(ContainerAccessType.PureRead);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
return getLinkAttributeValue(blob);
} catch (Exception e) {
// Caught exception while attempting download. Re-throw as an Azure
// storage exception.
throw new AzureException(e);
}
}
/**
* Private method to check for authenticated access.
*
* @ returns boolean -- true if access is credentialed and authenticated and
* false otherwise.
*/
private boolean isAuthenticatedAccess() throws AzureException {
if (isAnonymousCredentials) {
// Access to this storage account is unauthenticated.
return false;
}
// Access is authenticated.
return true;
}
/**
* This private method uses the root directory or the original container to
* list blobs under the directory or container depending on whether the
* original file system object was constructed with a short- or long-form URI.
* If the root directory is non-null the URI in the file constructor was in
* the long form.
*
* @param includeMetadata
* if set, the listed items will have their metadata populated
* already.
* @param useFlatBlobListing
* if set the list is flat, otherwise it is hierarchical.
*
* @returns blobItems : iterable collection of blob items.
* @throws URISyntaxException
*
*/
private Iterable<ListBlobItem> listRootBlobs(boolean includeMetadata,
boolean useFlatBlobListing) throws StorageException, URISyntaxException {
return rootDirectory.listBlobs(
null,
useFlatBlobListing,
includeMetadata
? EnumSet.of(BlobListingDetails.METADATA)
: EnumSet.noneOf(BlobListingDetails.class),
null,
getInstrumentedContext());
}
/**
* This private method uses the root directory or the original container to
* list blobs under the directory or container given a specified prefix for
* the directory depending on whether the original file system object was
* constructed with a short- or long-form URI. If the root directory is
* non-null the URI in the file constructor was in the long form.
*
* @param aPrefix
* : string name representing the prefix of containing blobs.
* @param includeMetadata
* if set, the listed items will have their metadata populated
* already.
* @param useFlatBlobListing
* if set the list is flat, otherwise it is hierarchical.
*
* @returns blobItems : iterable collection of blob items.
* @throws URISyntaxException
*
*/
private Iterable<ListBlobItem> listRootBlobs(String aPrefix, boolean includeMetadata,
boolean useFlatBlobListing) throws StorageException, URISyntaxException {
Iterable<ListBlobItem> list = rootDirectory.listBlobs(aPrefix,
useFlatBlobListing,
includeMetadata
? EnumSet.of(BlobListingDetails.METADATA)
: EnumSet.noneOf(BlobListingDetails.class),
null,
getInstrumentedContext());
return list;
}
/**
* This private method uses the root directory or the original container to
* list blobs under the directory or container given a specified prefix for
* the directory depending on whether the original file system object was
* constructed with a short- or long-form URI. It also uses the specified flat
* or hierarchical option, listing details options, request options, and
* operation context.
*
* @param aPrefix
* string name representing the prefix of containing blobs.
* @param useFlatBlobListing
* - the list is flat if true, or hierarchical otherwise.
* @param listingDetails
* - determine whether snapshots, metadata, committed/uncommitted
* data
* @param options
* - object specifying additional options for the request. null =
* default options
* @param opContext
* - context of the current operation
* @returns blobItems : iterable collection of blob items.
* @throws URISyntaxException
*
*/
private Iterable<ListBlobItem> listRootBlobs(String aPrefix, boolean useFlatBlobListing,
EnumSet<BlobListingDetails> listingDetails, BlobRequestOptions options,
OperationContext opContext) throws StorageException, URISyntaxException {
CloudBlobDirectoryWrapper directory = this.container.getDirectoryReference(aPrefix);
return directory.listBlobs(
null,
useFlatBlobListing,
listingDetails,
options,
opContext);
}
/**
* This private method uses the root directory or the original container to
* get the block blob reference depending on whether the original file system
* object was constructed with a short- or long-form URI. If the root
* directory is non-null the URI in the file constructor was in the long form.
*
* @param aKey
* : a key used to query Azure for the block blob.
* @returns blob : a reference to the Azure block blob corresponding to the
* key.
* @throws URISyntaxException
*
*/
private CloudBlobWrapper getBlobReference(String aKey)
throws StorageException, URISyntaxException {
CloudBlobWrapper blob = null;
if (isPageBlobKey(aKey)) {
blob = this.container.getPageBlobReference(aKey);
} else {
blob = this.container.getBlockBlobReference(aKey);
blob.setStreamMinimumReadSizeInBytes(downloadBlockSizeBytes);
blob.setWriteBlockSizeInBytes(uploadBlockSizeBytes);
}
return blob;
}
/**
* This private method normalizes the key by stripping the container name from
* the path and returns a path relative to the root directory of the
* container.
*
* @param keyUri
* - adjust this key to a path relative to the root directory
*
* @returns normKey
*/
private String normalizeKey(URI keyUri) {
String normKey;
// Strip the container name from the path and return the path
// relative to the root directory of the container.
int parts = isStorageEmulator ? 4 : 3;
normKey = keyUri.getPath().split("/", parts)[(parts - 1)];
// Return the fixed key.
return normKey;
}
/**
* This private method normalizes the key by stripping the container name from
* the path and returns a path relative to the root directory of the
* container.
*
* @param blob
* - adjust the key to this blob to a path relative to the root
* directory
*
* @returns normKey
*/
private String normalizeKey(CloudBlobWrapper blob) {
return normalizeKey(blob.getUri());
}
/**
* This private method normalizes the key by stripping the container name from
* the path and returns a path relative to the root directory of the
* container.
*
* @param directory
* - adjust the key to this directory to a path relative to the root
* directory
*
* @returns normKey
*/
private String normalizeKey(CloudBlobDirectoryWrapper directory) {
String dirKey = normalizeKey(directory.getUri());
// Strip the last delimiter
if (dirKey.endsWith(PATH_DELIMITER)) {
dirKey = dirKey.substring(0, dirKey.length() - 1);
}
return dirKey;
}
/**
* Default method to creates a new OperationContext for the Azure Storage
* operation that has listeners hooked to it that will update the metrics for
* this file system. This method does not bind to receive send request
* callbacks by default.
*
* @return The OperationContext object to use.
*/
private OperationContext getInstrumentedContext() {
// Default is to not bind to receive send callback events.
return getInstrumentedContext(false);
}
/**
* Creates a new OperationContext for the Azure Storage operation that has
* listeners hooked to it that will update the metrics for this file system.
*
* @param bindConcurrentOOBIo
* - bind to intercept send request call backs to handle OOB I/O.
*
* @return The OperationContext object to use.
*/
private OperationContext getInstrumentedContext(boolean bindConcurrentOOBIo) {
OperationContext operationContext = new OperationContext();
// Set User-Agent
operationContext.getSendingRequestEventHandler().addListener(new StorageEvent<SendingRequestEvent>() {
@Override
public void eventOccurred(SendingRequestEvent eventArg) {
HttpURLConnection connection = (HttpURLConnection) eventArg.getConnectionObject();
String userAgentInfo = String.format(Utility.LOCALE_US, "WASB/%s (%s) %s",
VersionInfo.getVersion(), userAgentId, BaseRequest.getUserAgent());
connection.setRequestProperty(Constants.HeaderConstants.USER_AGENT, userAgentInfo);
}
});
if (selfThrottlingEnabled) {
SelfThrottlingIntercept.hook(operationContext, selfThrottlingReadFactor,
selfThrottlingWriteFactor);
} else if (autoThrottlingEnabled) {
ClientThrottlingIntercept.hook(operationContext);
}
if (bandwidthGaugeUpdater != null) {
//bandwidthGaugeUpdater is null when we config to skip azure metrics
ResponseReceivedMetricUpdater.hook(
operationContext,
instrumentation,
bandwidthGaugeUpdater);
}
// Bind operation context to receive send request callbacks on this operation.
// If reads concurrent to OOB writes are allowed, the interception will reset
// the conditional header on all Azure blob storage read requests.
if (bindConcurrentOOBIo) {
SendRequestIntercept.bind(operationContext);
}
if (testHookOperationContext != null) {
operationContext =
testHookOperationContext.modifyOperationContext(operationContext);
}
ErrorMetricUpdater.hook(operationContext, instrumentation);
// Return the operation context.
return operationContext;
}
@Override
public FileMetadata retrieveMetadata(String key) throws IOException {
// Attempts to check status may occur before opening any streams so first,
// check if a session exists, if not create a session with the Azure storage
// server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
LOG.debug("Retrieving metadata for {}", key);
try {
if (checkContainer(ContainerAccessType.PureRead) == ContainerState.DoesntExist) {
// The container doesn't exist, so spare some service calls and just
// return null now.
return null;
}
// Handle the degenerate cases where the key does not exist or the
// key is a container.
if (key.equals("/")) {
// The key refers to root directory of container.
// Set the modification time for root to zero.
return new FileMetadata(key, 0, defaultPermissionNoBlobMetadata(),
BlobMaterialization.Implicit, hadoopBlockSize);
}
CloudBlobWrapper blob = getBlobReference(key);
// Download attributes and return file metadata only if the blob
// exists.
if (null != blob && blob.exists(getInstrumentedContext())) {
LOG.debug("Found {} as an explicit blob. Checking if it's a file or folder.", key);
try {
// The blob exists, so capture the metadata from the blob
// properties.
blob.downloadAttributes(getInstrumentedContext());
BlobProperties properties = blob.getProperties();
if (retrieveFolderAttribute(blob)) {
LOG.debug("{} is a folder blob.", key);
return new FileMetadata(key, properties.getLastModified().getTime(),
getPermissionStatus(blob), BlobMaterialization.Explicit, hadoopBlockSize);
} else {
LOG.debug("{} is a normal blob.", key);
return new FileMetadata(
key, // Always return denormalized key with metadata.
getDataLength(blob, properties),
properties.getLastModified().getTime(),
getPermissionStatus(blob), hadoopBlockSize);
}
} catch(StorageException e){
if (!NativeAzureFileSystemHelper.isFileNotFoundException(e)) {
throw e;
}
}
}
// There is no file with that key name, but maybe it is a folder.
// Query the underlying folder/container to list the blobs stored
// there under that key.
//
Iterable<ListBlobItem> objects =
listRootBlobs(
key,
true,
EnumSet.of(BlobListingDetails.METADATA),
null,
getInstrumentedContext());
// Check if the directory/container has the blob items.
for (ListBlobItem blobItem : objects) {
if (blobItem instanceof CloudBlockBlobWrapper
|| blobItem instanceof CloudPageBlobWrapper) {
LOG.debug("Found blob as a directory-using this file under it to infer its properties {}",
blobItem.getUri());
blob = (CloudBlobWrapper) blobItem;
// The key specifies a directory. Create a FileMetadata object which
// specifies as such.
BlobProperties properties = blob.getProperties();
return new FileMetadata(key, properties.getLastModified().getTime(),
getPermissionStatus(blob), BlobMaterialization.Implicit, hadoopBlockSize);
}
}
// Return to caller with a null metadata object.
return null;
} catch (Exception e) {
// Re-throw the exception as an Azure storage exception.
throw new AzureException(e);
}
}
@Override
public byte[] retrieveAttribute(String key, String attribute) throws IOException {
try {
checkContainer(ContainerAccessType.PureRead);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
String value = getMetadataAttribute(blob.getMetadata(),
ensureValidAttributeName(attribute));
value = decodeMetadataAttribute(value);
return value == null ? null : value.getBytes(METADATA_ENCODING);
} catch (Exception e) {
throw new AzureException(e);
}
}
@Override
public void storeAttribute(String key, String attribute, byte[] value) throws IOException {
try {
checkContainer(ContainerAccessType.ReadThenWrite);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
String encodedValue = encodeMetadataAttribute(new String(value, METADATA_ENCODING));
storeMetadataAttribute(blob, ensureValidAttributeName(attribute), encodedValue);
blob.uploadMetadata(getInstrumentedContext());
} catch (Exception e) {
throw new AzureException(e);
}
}
@Override
public InputStream retrieve(String key) throws AzureException, IOException {
return retrieve(key, 0);
}
@Override
public InputStream retrieve(String key, long startByteOffset)
throws AzureException, IOException {
return retrieve(key, startByteOffset, Optional.empty());
}
@Override
public InputStream retrieve(String key, long startByteOffset,
Optional<Configuration> options) throws AzureException, IOException {
try {
// Check if a session exists, if not create a session with the
// Azure storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
checkContainer(ContainerAccessType.PureRead);
InputStream inputStream = openInputStream(getBlobReference(key), options);
if (startByteOffset > 0) {
// Skip bytes and ignore return value. This is okay
// because if you try to skip too far you will be positioned
// at the end and reads will not return data.
inputStream.skip(startByteOffset);
}
return inputStream;
} catch (IOException e) {
throw e;
} catch (Exception e) {
// Re-throw as an Azure storage exception.
throw new AzureException(e);
}
}
@Override
public FileMetadata[] list(String prefix, final int maxListingCount,
final int maxListingDepth) throws IOException {
return listInternal(prefix, maxListingCount, maxListingDepth);
}
private FileMetadata[] listInternal(String prefix, final int maxListingCount,
final int maxListingDepth)
throws IOException {
try {
checkContainer(ContainerAccessType.PureRead);
if (0 < prefix.length() && !prefix.endsWith(PATH_DELIMITER)) {
prefix += PATH_DELIMITER;
}
// Enable flat listing option only if depth is unbounded and config
// KEY_ENABLE_FLAT_LISTING is enabled.
boolean enableFlatListing = false;
if (maxListingDepth < 0 && sessionConfiguration.getBoolean(
KEY_ENABLE_FLAT_LISTING, DEFAULT_ENABLE_FLAT_LISTING)) {
enableFlatListing = true;
}
Iterable<ListBlobItem> objects;
if (prefix.equals("/")) {
objects = listRootBlobs(true, enableFlatListing);
} else {
objects = listRootBlobs(prefix, true, enableFlatListing);
}
HashMap<String, FileMetadata> fileMetadata = new HashMap<>(256);
for (ListBlobItem blobItem : objects) {
// Check that the maximum listing count is not exhausted.
//
if (0 < maxListingCount
&& fileMetadata.size() >= maxListingCount) {
break;
}
if (blobItem instanceof CloudBlockBlobWrapper || blobItem instanceof CloudPageBlobWrapper) {
String blobKey = null;
CloudBlobWrapper blob = (CloudBlobWrapper) blobItem;
BlobProperties properties = blob.getProperties();
// Determine format of the blob name depending on whether an absolute
// path is being used or not.
blobKey = normalizeKey(blob);
FileMetadata metadata;
if (retrieveFolderAttribute(blob)) {
metadata = new FileMetadata(blobKey,
properties.getLastModified().getTime(),
getPermissionStatus(blob),
BlobMaterialization.Explicit,
hadoopBlockSize);
} else {
metadata = new FileMetadata(
blobKey,
getDataLength(blob, properties),
properties.getLastModified().getTime(),
getPermissionStatus(blob),
hadoopBlockSize);
}
// Add the metadata but remove duplicates. Note that the azure
// storage java SDK returns two types of entries: CloudBlobWrappter
// and CloudDirectoryWrapper. In the case where WASB generated the
// data, there will be an empty blob for each "directory", and we will
// receive a CloudBlobWrapper. If there are also files within this
// "directory", we will also receive a CloudDirectoryWrapper. To
// complicate matters, the data may not be generated by WASB, in
// which case we may not have an empty blob for each "directory".
// So, sometimes we receive both a CloudBlobWrapper and a
// CloudDirectoryWrapper for each directory, and sometimes we receive
// one or the other but not both. We remove duplicates, but
// prefer CloudBlobWrapper over CloudDirectoryWrapper.
// Furthermore, it is very unfortunate that the list results are not
// ordered, and it is a partial list which uses continuation. So
// the HashMap is the best structure to remove the duplicates, despite
// its potential large size.
fileMetadata.put(blobKey, metadata);
} else if (blobItem instanceof CloudBlobDirectoryWrapper) {
CloudBlobDirectoryWrapper directory = (CloudBlobDirectoryWrapper) blobItem;
// Determine format of directory name depending on whether an absolute
// path is being used or not.
//
String dirKey = normalizeKey(directory);
// Strip the last /
if (dirKey.endsWith(PATH_DELIMITER)) {
dirKey = dirKey.substring(0, dirKey.length() - 1);
}
// Reached the targeted listing depth. Return metadata for the
// directory using default permissions.
//
// Note: Something smarter should be done about permissions. Maybe
// inherit the permissions of the first non-directory blob.
// Also, getting a proper value for last-modified is tricky.
FileMetadata directoryMetadata = new FileMetadata(dirKey, 0,
defaultPermissionNoBlobMetadata(), BlobMaterialization.Implicit,
hadoopBlockSize);
// Add the directory metadata to the list only if it's not already
// there. See earlier note, we prefer CloudBlobWrapper over
// CloudDirectoryWrapper because it may have additional metadata (
// properties and ACLs).
if (!fileMetadata.containsKey(dirKey)) {
fileMetadata.put(dirKey, directoryMetadata);
}
if (!enableFlatListing) {
// Currently at a depth of one, decrement the listing depth for
// sub-directories.
buildUpList(directory, fileMetadata, maxListingCount,
maxListingDepth - 1);
}
}
}
return fileMetadata.values().toArray(new FileMetadata[fileMetadata.size()]);
} catch (Exception e) {
// Re-throw as an Azure storage exception.
//
throw new AzureException(e);
}
}
/**
* Build up a metadata list of blobs in an Azure blob directory. This method
* uses a in-order first traversal of blob directory structures to maintain
* the sorted order of the blob names.
*
* @param aCloudBlobDirectory Azure blob directory
* @param metadataHashMap a map of file metadata objects for each
* non-directory blob.
* @param maxListingCount maximum length of the built up list.
*/
private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory,
HashMap<String, FileMetadata> metadataHashMap, final int maxListingCount,
final int maxListingDepth) throws Exception {
// Push the blob directory onto the stack.
//
AzureLinkedStack<Iterator<ListBlobItem>> dirIteratorStack =
new AzureLinkedStack<Iterator<ListBlobItem>>();
Iterable<ListBlobItem> blobItems = aCloudBlobDirectory.listBlobs(null,
false, EnumSet.of(BlobListingDetails.METADATA), null,
getInstrumentedContext());
Iterator<ListBlobItem> blobItemIterator = blobItems.iterator();
if (0 == maxListingDepth || 0 == maxListingCount) {
// Recurrence depth and listing count are already exhausted. Return
// immediately.
return;
}
// The directory listing depth is unbounded if the maximum listing depth
// is negative.
final boolean isUnboundedDepth = (maxListingDepth < 0);
// Reset the current directory listing depth.
int listingDepth = 1;
// Loop until all directories have been traversed in-order. Loop only
// the following conditions are satisfied:
// (1) The stack is not empty, and
// (2) maxListingCount > 0 implies that the number of items in the
// metadata list is less than the max listing count.
while (null != blobItemIterator
&& (maxListingCount <= 0 || metadataHashMap.size() < maxListingCount)) {
while (blobItemIterator.hasNext()) {
// Check if the count of items on the list exhausts the maximum
// listing count.
//
if (0 < maxListingCount && metadataHashMap.size() >= maxListingCount) {
break;
}
ListBlobItem blobItem = blobItemIterator.next();
// Add the file metadata to the list if this is not a blob
// directory item.
//
if (blobItem instanceof CloudBlockBlobWrapper || blobItem instanceof CloudPageBlobWrapper) {
String blobKey = null;
CloudBlobWrapper blob = (CloudBlobWrapper) blobItem;
BlobProperties properties = blob.getProperties();
// Determine format of the blob name depending on whether an absolute
// path is being used or not.
blobKey = normalizeKey(blob);
FileMetadata metadata;
if (retrieveFolderAttribute(blob)) {
metadata = new FileMetadata(blobKey,
properties.getLastModified().getTime(),
getPermissionStatus(blob),
BlobMaterialization.Explicit,
hadoopBlockSize);
} else {
metadata = new FileMetadata(
blobKey,
getDataLength(blob, properties),
properties.getLastModified().getTime(),
getPermissionStatus(blob),
hadoopBlockSize);
}
// Add the metadata but remove duplicates. Note that the azure
// storage java SDK returns two types of entries: CloudBlobWrappter
// and CloudDirectoryWrapper. In the case where WASB generated the
// data, there will be an empty blob for each "directory", and we will
// receive a CloudBlobWrapper. If there are also files within this
// "directory", we will also receive a CloudDirectoryWrapper. To
// complicate matters, the data may not be generated by WASB, in
// which case we may not have an empty blob for each "directory".
// So, sometimes we receive both a CloudBlobWrapper and a
// CloudDirectoryWrapper for each directory, and sometimes we receive
// one or the other but not both. We remove duplicates, but
// prefer CloudBlobWrapper over CloudDirectoryWrapper.
// Furthermore, it is very unfortunate that the list results are not
// ordered, and it is a partial list which uses continuation. So
// the HashMap is the best structure to remove the duplicates, despite
// its potential large size.
metadataHashMap.put(blobKey, metadata);
} else if (blobItem instanceof CloudBlobDirectoryWrapper) {
CloudBlobDirectoryWrapper directory = (CloudBlobDirectoryWrapper) blobItem;
// This is a directory blob, push the current iterator onto
// the stack of iterators and start iterating through the current
// directory.
if (isUnboundedDepth || maxListingDepth > listingDepth) {
// Push the current directory on the stack and increment the listing
// depth.
dirIteratorStack.push(blobItemIterator);
++listingDepth;
// The current blob item represents the new directory. Get
// an iterator for this directory and continue by iterating through
// this directory.
blobItems = directory.listBlobs(null, false,
EnumSet.noneOf(BlobListingDetails.class), null,
getInstrumentedContext());
blobItemIterator = blobItems.iterator();
} else {
// Determine format of directory name depending on whether an
// absolute path is being used or not.
String dirKey = normalizeKey(directory);
// Add the directory metadata to the list only if it's not already
// there. See earlier note, we prefer CloudBlobWrapper over
// CloudDirectoryWrapper because it may have additional metadata (
// properties and ACLs).
if (!metadataHashMap.containsKey(dirKey)) {
// Reached the targeted listing depth. Return metadata for the
// directory using default permissions.
//
// Note: Something smarter should be done about permissions. Maybe
// inherit the permissions of the first non-directory blob.
// Also, getting a proper value for last-modified is tricky.
//
FileMetadata directoryMetadata = new FileMetadata(dirKey,
0,
defaultPermissionNoBlobMetadata(),
BlobMaterialization.Implicit,
hadoopBlockSize);
// Add the directory metadata to the list.
metadataHashMap.put(dirKey, directoryMetadata);
}
}
}
}
// Traversal of directory tree
// Check if the iterator stack is empty. If it is set the next blob
// iterator to null. This will act as a terminator for the for-loop.
// Otherwise pop the next iterator from the stack and continue looping.
//
if (dirIteratorStack.isEmpty()) {
blobItemIterator = null;
} else {
// Pop the next directory item from the stack and decrement the
// depth.
blobItemIterator = dirIteratorStack.pop();
--listingDepth;
// Assertion: Listing depth should not be less than zero.
if (listingDepth < 0) {
throw new AssertionError("Non-negative listing depth expected");
}
}
}
}
/**
* Return the actual data length of the blob with the specified properties.
* If it is a page blob, you can't rely on the length from the properties
* argument and you must get it from the file. Otherwise, you can.
*/
private long getDataLength(CloudBlobWrapper blob, BlobProperties properties)
throws AzureException {
if (blob instanceof CloudPageBlobWrapper) {
try {
return PageBlobInputStream.getPageBlobDataSize((CloudPageBlobWrapper) blob,
getInstrumentedContext(
isConcurrentOOBAppendAllowed()));
} catch (Exception e) {
throw new AzureException(
"Unexpected exception getting page blob actual data size.", e);
}
}
return properties.getLength();
}
/**
* Deletes the given blob, taking special care that if we get a
* blob-not-found exception upon retrying the operation, we just
* swallow the error since what most probably happened is that
* the first operation succeeded on the server.
* @param blob The blob to delete.
* @param lease Azure blob lease, or null if no lease is to be used.
* @throws StorageException
*/
private void safeDelete(CloudBlobWrapper blob, SelfRenewingLease lease) throws StorageException {
OperationContext operationContext = getInstrumentedContext();
try {
blob.delete(operationContext, lease);
} catch (StorageException e) {
if (!NativeAzureFileSystemHelper.isFileNotFoundException(e)) {
LOG.error("Encountered Storage Exception for delete on Blob: {}"
+ ", Exception Details: {} Error Code: {}",
blob.getUri(), e.getMessage(), e.getErrorCode());
}
// On exception, check that if:
// 1. It's a BlobNotFound exception AND
// 2. It got there after one-or-more retries THEN
// we swallow the exception.
if (e.getErrorCode() != null
&& "BlobNotFound".equals(e.getErrorCode())
&& operationContext.getRequestResults().size() > 1
&& operationContext.getRequestResults().get(0).getException() != null) {
LOG.debug("Swallowing delete exception on retry: {}", e.getMessage());
return;
} else {
throw e;
}
} finally {
if (lease != null) {
lease.free();
}
}
}
/**
* API implementation to delete a blob in the back end azure storage.
*/
@Override
public boolean delete(String key, SelfRenewingLease lease) throws IOException {
try {
if (checkContainer(ContainerAccessType.ReadThenWrite) == ContainerState.DoesntExist) {
// Container doesn't exist, no need to do anything
return true;
}
// Get the blob reference and delete it.
CloudBlobWrapper blob = getBlobReference(key);
safeDelete(blob, lease);
return true;
} catch (Exception e) {
if (e instanceof StorageException
&& NativeAzureFileSystemHelper.isFileNotFoundException(
(StorageException) e)) {
// the file or directory does not exist
return false;
}
throw new AzureException(e);
}
}
/**
* API implementation to delete a blob in the back end azure storage.
*/
@Override
public boolean delete(String key) throws IOException {
try {
return delete(key, null);
} catch (IOException e) {
Throwable t = e.getCause();
if (t instanceof StorageException) {
StorageException se = (StorageException) t;
if ("LeaseIdMissing".equals(se.getErrorCode())){
SelfRenewingLease lease = null;
try {
lease = acquireLease(key);
return delete(key, lease);
} catch (AzureException e3) {
LOG.warn("Got unexpected exception trying to acquire lease on "
+ key + "." + e3.getMessage());
throw e3;
} finally {
try {
if (lease != null){
lease.free();
}
} catch (Exception e4){
LOG.error("Unable to free lease on " + key, e4);
}
}
} else {
throw e;
}
} else {
throw e;
}
}
}
@Override
public void rename(String srcKey, String dstKey) throws IOException {
rename(srcKey, dstKey, false, null, true);
}
@Override
public void rename(String srcKey, String dstKey, boolean acquireLease,
SelfRenewingLease existingLease) throws IOException {
rename(srcKey, dstKey, acquireLease, existingLease, true);
}
@Override
public void rename(String srcKey, String dstKey, boolean acquireLease,
SelfRenewingLease existingLease, boolean overwriteDestination) throws IOException {
LOG.debug("Moving {} to {}", srcKey, dstKey);
if (acquireLease && existingLease != null) {
throw new IOException("Cannot acquire new lease if one already exists.");
}
CloudBlobWrapper srcBlob = null;
CloudBlobWrapper dstBlob = null;
SelfRenewingLease lease = null;
try {
// Attempts rename may occur before opening any streams so first,
// check if a session exists, if not create a session with the Azure
// storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
checkContainer(ContainerAccessType.ReadThenWrite);
// Get the source blob and assert its existence. If the source key
// needs to be normalized then normalize it.
//
srcBlob = getBlobReference(srcKey);
if (!srcBlob.exists(getInstrumentedContext())) {
throw new AzureException("Source blob " + srcKey + " does not exist.");
}
/**
* Conditionally get a lease on the source blob to prevent other writers
* from changing it. This is used for correctness in HBase when log files
* are renamed. It generally should do no harm other than take a little
* more time for other rename scenarios. When the HBase master renames a
* log file folder, the lease locks out other writers. This
* prevents a region server that the master thinks is dead, but is still
* alive, from committing additional updates. This is different than
* when HBase runs on HDFS, where the region server recovers the lease
* on a log file, to gain exclusive access to it, before it splits it.
*/
if (acquireLease) {
lease = srcBlob.acquireLease();
} else if (existingLease != null) {
lease = existingLease;
}
// Get the destination blob. The destination key always needs to be
// normalized.
//
dstBlob = getBlobReference(dstKey);
// Rename the source blob to the destination blob by copying it to
// the destination blob then deleting it.
//
// Copy blob operation in Azure storage is very costly. It will be highly
// likely throttled during Azure storage gc. Short term fix will be using
// a more intensive exponential retry policy when the cluster is getting
// throttled.
try {
dstBlob.startCopyFromBlob(srcBlob, null,
getInstrumentedContext(), overwriteDestination);
} catch (StorageException se) {
if (se.getHttpStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) {
int copyBlobMinBackoff = sessionConfiguration.getInt(
KEY_COPYBLOB_MIN_BACKOFF_INTERVAL,
DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL);
int copyBlobMaxBackoff = sessionConfiguration.getInt(
KEY_COPYBLOB_MAX_BACKOFF_INTERVAL,
DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL);
int copyBlobDeltaBackoff = sessionConfiguration.getInt(
KEY_COPYBLOB_BACKOFF_INTERVAL,
DEFAULT_COPYBLOB_BACKOFF_INTERVAL);
int copyBlobMaxRetries = sessionConfiguration.getInt(
KEY_COPYBLOB_MAX_IO_RETRIES,
DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS);
BlobRequestOptions options = new BlobRequestOptions();
options.setRetryPolicyFactory(new RetryExponentialRetry(
copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff,
copyBlobMaxRetries));
dstBlob.startCopyFromBlob(srcBlob, options,
getInstrumentedContext(), overwriteDestination);
} else {
throw se;
}
}
waitForCopyToComplete(dstBlob, getInstrumentedContext());
safeDelete(srcBlob, lease);
} catch (StorageException e) {
if (e.getHttpStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) {
LOG.warn("Rename: CopyBlob: StorageException: ServerBusy: Retry complete, will attempt client side copy for page blob");
InputStream ipStream = null;
OutputStream opStream = null;
try {
if (srcBlob.getProperties().getBlobType() == BlobType.PAGE_BLOB){
ipStream = openInputStream(srcBlob, Optional.empty());
opStream = openOutputStream(dstBlob);
byte[] buffer = new byte[PageBlobFormatHelpers.PAGE_SIZE];
int len;
while ((len = ipStream.read(buffer)) != -1) {
opStream.write(buffer, 0, len);
}
opStream.flush();
opStream.close();
ipStream.close();
} else {
throw new AzureException(e);
}
safeDelete(srcBlob, lease);
} catch(StorageException se) {
LOG.warn("Rename: CopyBlob: StorageException: Failed");
throw new AzureException(se);
} finally {
IOUtils.closeStream(ipStream);
IOUtils.closeStream(opStream);
}
} else {
throw new AzureException(e);
}
} catch (URISyntaxException e) {
// Re-throw exception as an Azure storage exception.
throw new AzureException(e);
}
}
private void waitForCopyToComplete(CloudBlobWrapper blob, OperationContext opContext){
boolean copyInProgress = true;
while (copyInProgress) {
try {
blob.downloadAttributes(opContext);
}
catch (StorageException se){
}
// test for null because mocked filesystem doesn't know about copystates yet.
copyInProgress = (blob.getCopyState() != null && blob.getCopyState().getStatus() == CopyStatus.PENDING);
if (copyInProgress) {
try {
Thread.sleep(1000);
}
catch (InterruptedException ie){
//ignore
}
}
}
}
/**
* Checks whether an explicit file/folder exists.
* This is used by redo of atomic rename.
* There was a bug(apache jira HADOOP-12780) during atomic rename if
* process crashes after an inner directory has been renamed but still
* there are file under that directory to be renamed then after the
* process comes again it tries to redo the renames. It checks whether
* the directory exists or not by calling filesystem.exist.
* But filesystem.Exists will treat that directory as implicit directory
* and return true as file exists under that directory. So It will try
* try to rename that directory and will fail as the corresponding blob
* does not exist. So this method explicitly checks for the blob.
*/
@Override
public boolean explicitFileExists(String key) throws AzureException {
CloudBlobWrapper blob;
try {
blob = getBlobReference(key);
if (null != blob && blob.exists(getInstrumentedContext())) {
return true;
}
return false;
} catch (StorageException e) {
throw new AzureException(e);
} catch (URISyntaxException e) {
throw new AzureException(e);
}
}
/**
* Changes the permission status on the given key.
*/
@Override
public void changePermissionStatus(String key, PermissionStatus newPermission)
throws AzureException {
try {
checkContainer(ContainerAccessType.ReadThenWrite);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
storePermissionStatus(blob, newPermission);
blob.uploadMetadata(getInstrumentedContext());
} catch (Exception e) {
throw new AzureException(e);
}
}
@Override
public void purge(String prefix) throws IOException {
try {
// Attempts to purge may occur before opening any streams so first,
// check if a session exists, if not create a session with the Azure
// storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
if (checkContainer(ContainerAccessType.ReadThenWrite) == ContainerState.DoesntExist) {
// Container doesn't exist, no need to do anything.
return;
}
// Get all blob items with the given prefix from the container and delete
// them.
Iterable<ListBlobItem> objects = listRootBlobs(prefix, false, false);
for (ListBlobItem blobItem : objects) {
((CloudBlob) blobItem).delete(DeleteSnapshotsOption.NONE, null, null,
getInstrumentedContext());
}
} catch (Exception e) {
// Re-throw as an Azure storage exception.
//
throw new AzureException(e);
}
}
/**
* Get a lease on the blob identified by key. This lease will be renewed
* indefinitely by a background thread.
*/
@Override
public SelfRenewingLease acquireLease(String key) throws AzureException {
LOG.debug("acquiring lease on {}", key);
try {
checkContainer(ContainerAccessType.ReadThenWrite);
CloudBlobWrapper blob = getBlobReference(key);
return blob.acquireLease();
}
catch (Exception e) {
// Caught exception while attempting to get lease. Re-throw as an
// Azure storage exception.
throw new AzureException(e);
}
}
@Override
public void updateFolderLastModifiedTime(String key, Date lastModified,
SelfRenewingLease folderLease)
throws AzureException {
try {
checkContainer(ContainerAccessType.ReadThenWrite);
CloudBlobWrapper blob = getBlobReference(key);
//setLastModified function is not available in 2.0.0 version. blob.uploadProperties automatically updates last modified
//timestamp to current time
blob.uploadProperties(getInstrumentedContext(), folderLease);
} catch (Exception e) {
// Caught exception while attempting to update the properties. Re-throw as an
// Azure storage exception.
throw new AzureException(e);
}
}
@Override
public void updateFolderLastModifiedTime(String key,
SelfRenewingLease folderLease) throws AzureException {
final Calendar lastModifiedCalendar = Calendar
.getInstance(Utility.LOCALE_US);
lastModifiedCalendar.setTimeZone(Utility.UTC_ZONE);
Date lastModified = lastModifiedCalendar.getTime();
updateFolderLastModifiedTime(key, lastModified, folderLease);
}
@Override
public void dump() throws IOException {
}
@Override
public void close() {
if (bandwidthGaugeUpdater != null) {
bandwidthGaugeUpdater.close();
bandwidthGaugeUpdater = null;
}
}
// Finalizer to ensure complete shutdown
@Override
protected void finalize() throws Throwable {
LOG.debug("finalize() called");
close();
super.finalize();
}
@Override
public DataOutputStream retrieveAppendStream(String key, int bufferSize) throws IOException {
try {
if (isPageBlobKey(key)) {
throw new UnsupportedOperationException("Append not supported for Page Blobs");
}
CloudBlobWrapper blob = this.container.getBlockBlobReference(key);
OutputStream outputStream;
BlockBlobAppendStream blockBlobOutputStream = new BlockBlobAppendStream(
(CloudBlockBlobWrapper) blob,
key,
bufferSize,
isBlockBlobWithCompactionKey(key),
getInstrumentedContext());
outputStream = blockBlobOutputStream;
DataOutputStream dataOutStream = new SyncableDataOutputStream(
outputStream);
return dataOutStream;
} catch(Exception ex) {
throw new AzureException(ex);
}
}
}
| ContainerAccessType |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/errors/StreamsStoppedException.java | {
"start": 1248,
"end": 1621
} | class ____ extends InvalidStateStoreException {
private static final long serialVersionUID = 1L;
public StreamsStoppedException(final String message) {
super(message);
}
@SuppressWarnings("unused")
public StreamsStoppedException(final String message, final Throwable throwable) {
super(message, throwable);
}
}
| StreamsStoppedException |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_dubbo.java | {
"start": 382,
"end": 1195
} | class ____ extends TestCase {
protected void setUp() throws Exception {
ParserConfig.global.addAccept("com.alibaba.json.test.dubbo.Tigers");
}
public void test_0 () throws Exception {
HelloServiceImpl helloService = new HelloServiceImpl();
Tiger tiger = new Tiger();
tiger.setTigerName("东北虎");
tiger.setTigerSex(true);
//Tiger tigers = helloService.eatTiger(tiger).getTiger();
Tigers tigers = helloService.eatTiger(tiger);
Assert.assertNotNull(tigers.getTiger());
String text = JSON.toJSONString(tigers, SerializerFeature.WriteClassName);
System.out.println(text);
Tigers tigers1 = (Tigers) JSON.parse(text);
Assert.assertNotNull(tigers1.getTiger());
}
}
| Bug_for_dubbo |
java | google__guice | core/src/com/google/inject/internal/InjectorShell.java | {
"start": 12309,
"end": 14484
} | class ____ extends InternalFactory<Logger> implements Provider<Logger> {
@Override
public Logger get(InternalContext context, Dependency<?> dependency, boolean linked) {
return makeLogger(dependency);
}
@Override
public Logger get() {
return Logger.getAnonymousLogger();
}
@Override
public Provider<Logger> makeProvider(InjectorImpl injector, Dependency<?> dependency) {
return InternalFactory.makeProviderFor(makeLogger(dependency), this);
}
@Override
MethodHandleResult makeHandle(LinkageContext context, boolean linked) {
return makeCachable(MAKE_LOGGER_MH);
}
private static final MethodHandle MAKE_LOGGER_MH;
static {
try {
MAKE_LOGGER_MH =
castReturnToObject(
MethodHandles.dropArguments(
MethodHandles.lookup()
.findStatic(
LoggerFactory.class,
"makeLogger",
methodType(Logger.class, Dependency.class)),
0,
InternalContext.class));
} catch (ReflectiveOperationException e) {
throw new LinkageError("Failed to find makeLogger function", e);
}
}
private static Logger makeLogger(Dependency<?> dependency) {
InjectionPoint injectionPoint = dependency.getInjectionPoint();
return injectionPoint == null
? Logger.getAnonymousLogger()
: Logger.getLogger(injectionPoint.getMember().getDeclaringClass().getName());
}
@Override
public String toString() {
return "Provider<Logger>";
}
}
private static void bindStage(InjectorImpl injector, Stage stage) {
Key<Stage> key = Key.get(Stage.class);
InstanceBindingImpl<Stage> stageBinding =
new InstanceBindingImpl<>(
injector,
key,
SourceProvider.UNKNOWN_SOURCE,
ConstantFactory.create(stage, SourceProvider.UNKNOWN_SOURCE),
ImmutableSet.<InjectionPoint>of(),
stage);
injector.getBindingData().putBinding(key, stageBinding);
}
private static | LoggerFactory |
java | apache__dubbo | dubbo-registry/dubbo-registry-multicast/src/test/java/org/apache/dubbo/registry/multicast/MulticastRegistryFactoryTest.java | {
"start": 1158,
"end": 1469
} | class ____ {
@Test
void shouldCreateRegistry() {
Registry registry = new MulticastRegistryFactory().createRegistry(URL.valueOf("multicast://239.255.255.255/"));
assertThat(registry, not(nullValue()));
assertThat(registry.isAvailable(), is(true));
}
}
| MulticastRegistryFactoryTest |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/plain/AbstractAtomicGeoShapeShapeFieldData.java | {
"start": 1380,
"end": 2474
} | class ____ extends LeafShapeFieldData.ShapeScriptValues<GeoPoint, GeoShapeValues.GeoShapeValue>
implements
ScriptDocValues.Geometry {
public GeoShapeScriptValues(GeometrySupplier<GeoPoint, GeoShapeValues.GeoShapeValue> supplier) {
super(supplier);
}
@Override
public GeoShapeValues.GeoShapeValue get(int index) {
return super.get(index);
}
@Override
public GeoShapeValues.GeoShapeValue getValue() {
return super.getValue();
}
@Override
public GeoBoundingBox getBoundingBox() {
return (GeoBoundingBox) super.getBoundingBox();
}
@Override
public double getMercatorWidth() {
return lonToSphericalMercator(getBoundingBox().right()) - lonToSphericalMercator(getBoundingBox().left());
}
@Override
public double getMercatorHeight() {
return latToSphericalMercator(getBoundingBox().top()) - latToSphericalMercator(getBoundingBox().bottom());
}
}
}
| GeoShapeScriptValues |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/bean/issues/BeanVsProcessorPerformanceTest.java | {
"start": 1136,
"end": 2424
} | class ____ extends ContextTestSupport {
private final int size = 100000;
@Override
protected Registry createCamelRegistry() throws Exception {
Registry jndi = super.createCamelRegistry();
jndi.bind("myLittleBean", new MyLittleBean());
return jndi;
}
@Test
public void testProcessor() {
StopWatch watch = new StopWatch();
for (int i = 0; i < size; i++) {
Object out = template.requestBody("direct:a", Integer.toString(i));
assertEquals("Bye " + i, out);
}
log.info("Processor took {} ms ", watch.taken());
}
@Test
public void testBean() {
StopWatch watch = new StopWatch();
for (int i = 0; i < size; i++) {
Object out = template.requestBody("direct:b", Integer.toString(i));
assertEquals("Bye " + i, out);
}
log.info("Bean took {} ms ", watch.taken());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:a").process(new MyLittleProcessor());
from("direct:b").bean("myLittleBean", "bye");
}
};
}
}
| BeanVsProcessorPerformanceTest |
java | elastic__elasticsearch | x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadataDiffableSerializationTests.java | {
"start": 1192,
"end": 3337
} | class ____ extends ChunkedToXContentDiffableSerializationTestCase<Metadata.ClusterCustom> {
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return AutoscalingTestCase.getAutoscalingNamedWriteableRegistry();
}
@Override
protected NamedXContentRegistry xContentRegistry() {
return AutoscalingTestCase.getAutoscalingXContentRegistry();
}
@Override
protected AutoscalingMetadata doParseInstance(final XContentParser parser) {
return AutoscalingMetadata.parse(parser);
}
@Override
protected Writeable.Reader<Metadata.ClusterCustom> instanceReader() {
return AutoscalingMetadata::new;
}
@Override
protected AutoscalingMetadata createTestInstance() {
return randomAutoscalingMetadata();
}
@Override
protected Metadata.ClusterCustom makeTestChanges(final Metadata.ClusterCustom testInstance) {
return mutateInstance(testInstance);
}
@Override
protected Metadata.ClusterCustom mutateInstance(final Metadata.ClusterCustom instance) {
final AutoscalingMetadata metadata = (AutoscalingMetadata) instance;
final SortedMap<String, AutoscalingPolicyMetadata> policies = new TreeMap<>(metadata.policies());
if (policies.size() == 0 || randomBoolean()) {
final AutoscalingPolicy policy = randomAutoscalingPolicy();
policies.put(policy.name(), new AutoscalingPolicyMetadata(policy));
} else {
// randomly remove a policy
final String name = randomFrom(policies.keySet());
final AutoscalingPolicyMetadata policyMetadata = policies.remove(name);
final AutoscalingPolicy mutatedPolicy = mutateAutoscalingPolicy(policyMetadata.policy());
policies.put(mutatedPolicy.name(), new AutoscalingPolicyMetadata(mutatedPolicy));
}
return new AutoscalingMetadata(policies);
}
@Override
protected Writeable.Reader<Diff<Metadata.ClusterCustom>> diffReader() {
return AutoscalingMetadata.AutoscalingMetadataDiff::new;
}
}
| AutoscalingMetadataDiffableSerializationTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/SqmRenderContext.java | {
"start": 473,
"end": 823
} | interface ____ {
/**
* Returns an alias for the given from node.
*
* @param from The from element
* @return The resolved alias
*/
String resolveAlias(SqmFrom<?, ?> from);
String resolveParameterName(JpaCriteriaParameter<?> parameter);
static SqmRenderContext simpleContext() {
return new SimpleSqmRenderContext();
}
}
| SqmRenderContext |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/OnResourceCondition.java | {
"start": 1430,
"end": 3076
} | class ____ extends SpringBootCondition {
@Override
public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
MultiValueMap<String, @Nullable Object> attributes = metadata
.getAllAnnotationAttributes(ConditionalOnResource.class.getName(), true);
Assert.state(attributes != null, "'attributes' must not be null");
ResourceLoader loader = context.getResourceLoader();
List<String> locations = new ArrayList<>();
List<@Nullable Object> resources = attributes.get("resources");
Assert.state(resources != null, "'resources' must not be null");
collectValues(locations, resources);
Assert.state(!locations.isEmpty(),
"@ConditionalOnResource annotations must specify at least one resource location");
List<String> missing = new ArrayList<>();
for (String location : locations) {
String resource = context.getEnvironment().resolvePlaceholders(location);
if (!loader.getResource(resource).exists()) {
missing.add(location);
}
}
if (!missing.isEmpty()) {
return ConditionOutcome.noMatch(ConditionMessage.forCondition(ConditionalOnResource.class)
.didNotFind("resource", "resources")
.items(Style.QUOTE, missing));
}
return ConditionOutcome.match(ConditionMessage.forCondition(ConditionalOnResource.class)
.found("location", "locations")
.items(locations));
}
private void collectValues(List<String> names, List<@Nullable Object> resources) {
for (Object resource : resources) {
Object[] items = (Object[]) resource;
if (items != null) {
for (Object item : items) {
names.add((String) item);
}
}
}
}
}
| OnResourceCondition |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/util/config/memory/ProcessMemoryUtilsTestBase.java | {
"start": 13155,
"end": 14708
} | class ____ implements ProcessMemorySpec {
private static final long serialVersionUID = 2863985135320165745L;
private final MemorySize heap;
private final MemorySize directMemory;
private final MemorySize metaspace;
private JvmArgTestingProcessMemorySpec(
MemorySize heap, MemorySize directMemory, MemorySize metaspace) {
this.heap = heap;
this.directMemory = directMemory;
this.metaspace = metaspace;
}
@Override
public MemorySize getJvmHeapMemorySize() {
return heap;
}
@Override
public MemorySize getJvmDirectMemorySize() {
return directMemory;
}
@Override
public MemorySize getJvmMetaspaceSize() {
return metaspace;
}
@Override
public MemorySize getJvmOverheadSize() {
throw new UnsupportedOperationException();
}
@Override
public MemorySize getTotalFlinkMemorySize() {
throw new UnsupportedOperationException();
}
@Override
public MemorySize getTotalProcessMemorySize() {
throw new UnsupportedOperationException();
}
public static JvmArgTestingProcessMemorySpec generate() {
return new JvmArgTestingProcessMemorySpec(
MemorySize.ofMebiBytes(1),
MemorySize.ofMebiBytes(2),
MemorySize.ofMebiBytes(3));
}
}
}
| JvmArgTestingProcessMemorySpec |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/inference/TextEmbeddingTests.java | {
"start": 1124,
"end": 2373
} | class ____ extends AbstractFunctionTestCase {
public TextEmbeddingTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
return parameterSuppliersFromTypedData(
List.of(
new TestCaseSupplier(
List.of(KEYWORD, KEYWORD),
() -> new TestCaseSupplier.TestCase(
List.of(
new TestCaseSupplier.TypedData(randomBytesReference(10).toBytesRef(), KEYWORD, "text"),
new TestCaseSupplier.TypedData(randomBytesReference(10).toBytesRef(), KEYWORD, "inference_id")
),
Matchers.blankOrNullString(),
DENSE_VECTOR,
equalTo(true)
)
)
)
);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new TextEmbedding(source, args.get(0), args.get(1));
}
@Override
protected boolean canSerialize() {
return false;
}
}
| TextEmbeddingTests |
java | square__retrofit | retrofit-adapters/rxjava2/src/main/java/retrofit2/adapter/rxjava2/CallEnqueueObservable.java | {
"start": 964,
"end": 1558
} | class ____<T> extends Observable<Response<T>> {
private final Call<T> originalCall;
CallEnqueueObservable(Call<T> originalCall) {
this.originalCall = originalCall;
}
@Override
protected void subscribeActual(Observer<? super Response<T>> observer) {
// Since Call is a one-shot type, clone it for each new observer.
Call<T> call = originalCall.clone();
CallCallback<T> callback = new CallCallback<>(call, observer);
observer.onSubscribe(callback);
if (!callback.isDisposed()) {
call.enqueue(callback);
}
}
private static final | CallEnqueueObservable |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/SameThreadTimeoutInvocationTests.java | {
"start": 963,
"end": 1896
} | class ____ {
@Test
void resetsInterruptFlag() {
var exception = assertThrows(TimeoutException.class, () -> withExecutor(executor -> {
var delegate = new EventuallyInterruptibleInvocation();
var duration = new TimeoutDuration(1, NANOSECONDS);
var timeoutInvocation = new SameThreadTimeoutInvocation<>(delegate, duration, executor, () -> "execution",
PreInterruptCallbackInvocation.NOOP);
timeoutInvocation.proceed();
}));
assertFalse(Thread.currentThread().isInterrupted());
assertThat(exception).hasMessage("execution timed out after 1 nanosecond");
}
private void withExecutor(ThrowingConsumer<ScheduledExecutorService> consumer) throws Throwable {
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
try {
consumer.accept(executor);
}
finally {
executor.shutdown();
assertTrue(executor.awaitTermination(5, SECONDS));
}
}
}
| SameThreadTimeoutInvocationTests |
java | quarkusio__quarkus | extensions/redis-client/runtime/src/main/java/io/quarkus/redis/runtime/datasource/ReactiveKeyCommandsImpl.java | {
"start": 735,
"end": 6953
} | class ____<K> extends AbstractKeyCommands<K> implements ReactiveKeyCommands<K> {
private final ReactiveRedisDataSource reactive;
public ReactiveKeyCommandsImpl(ReactiveRedisDataSourceImpl redis, Type k) {
super(redis, k);
this.reactive = redis;
}
@Override
public ReactiveRedisDataSource getDataSource() {
return reactive;
}
@Override
public Uni<Boolean> copy(K source, K destination) {
return super._copy(source, destination)
.map(Response::toBoolean);
}
@Override
public Uni<Boolean> copy(K source, K destination, CopyArgs copyArgs) {
return super._copy(source, destination, copyArgs)
.map(Response::toBoolean);
}
@Override
public Uni<Integer> del(K... keys) {
return super._del(keys)
.map(Response::toInteger);
}
@Override
public Uni<String> dump(K key) {
return super._dump(key)
.map(this::decodeStringOrNull);
}
@Override
public Uni<Boolean> exists(K key) {
return super._exists(key)
.map(Response::toBoolean);
}
@Override
public Uni<Integer> exists(K... keys) {
return super._exists(keys)
.map(Response::toInteger);
}
@Override
public Uni<Boolean> expire(K key, long seconds, ExpireArgs expireArgs) {
return super._expire(key, seconds, expireArgs)
.map(Response::toBoolean);
}
@Override
public Uni<Boolean> expire(K key, Duration duration, ExpireArgs expireArgs) {
return expire(key, duration.toSeconds(), expireArgs);
}
@Override
public Uni<Boolean> expire(K key, long seconds) {
return expire(key, seconds, new ExpireArgs());
}
@Override
public Uni<Boolean> expire(K key, Duration duration) {
return expire(key, duration.toSeconds(), new ExpireArgs());
}
@Override
public Uni<Boolean> expireat(K key, long timestamp) {
return expireat(key, timestamp, new ExpireArgs());
}
@Override
public Uni<Boolean> expireat(K key, Instant timestamp) {
return expireat(key, timestamp.getEpochSecond(), new ExpireArgs());
}
@Override
public Uni<Boolean> expireat(K key, long timestamp, ExpireArgs expireArgs) {
return super._expireat(key, timestamp, expireArgs)
.map(Response::toBoolean);
}
@Override
public Uni<Boolean> expireat(K key, Instant timestamp, ExpireArgs expireArgs) {
return expireat(key, timestamp.getEpochSecond(), expireArgs);
}
@Override
public Uni<Long> expiretime(K key) {
return super._expiretime(key)
.map(r -> decodeExpireResponse(key, r));
}
@Override
public Uni<List<K>> keys(String pattern) {
return super._keys(pattern)
.map(this::decodeKeys);
}
@Override
public Uni<Boolean> move(K key, long db) {
return super._move(key, db)
.map(Response::toBoolean);
}
@Override
public Uni<Boolean> persist(K key) {
return super._persist(key)
.map(Response::toBoolean);
}
@Override
public Uni<Boolean> pexpire(K key, long milliseconds, ExpireArgs expireArgs) {
return super._pexpire(key, milliseconds, expireArgs)
.map(Response::toBoolean);
}
@Override
public Uni<Boolean> pexpire(K key, Duration duration, ExpireArgs expireArgs) {
return pexpire(key, duration.toMillis(), expireArgs);
}
@Override
public Uni<Boolean> pexpire(K key, long ms) {
return pexpire(key, ms, new ExpireArgs());
}
@Override
public Uni<Boolean> pexpire(K key, Duration duration) {
return pexpire(key, duration.toMillis(), new ExpireArgs());
}
@Override
public Uni<Boolean> pexpireat(K key, long timestamp) {
return pexpireat(key, timestamp, new ExpireArgs());
}
@Override
public Uni<Boolean> pexpireat(K key, Instant timestamp) {
return pexpireat(key, timestamp.toEpochMilli(), new ExpireArgs());
}
@Override
public Uni<Boolean> pexpireat(K key, long timestamp, ExpireArgs expireArgs) {
return super._pexpireat(key, timestamp, expireArgs)
.map(Response::toBoolean);
}
@Override
public Uni<Boolean> pexpireat(K key, Instant timestamp, ExpireArgs expireArgs) {
return pexpireat(key, timestamp.toEpochMilli(), expireArgs);
}
@Override
public Uni<Long> pexpiretime(K key) {
return super._pexpiretime(key)
.map(r -> decodeExpireResponse(key, r));
}
@Override
public Uni<Long> pttl(K key) {
return super._pttl(key)
.map(r -> decodeExpireResponse(key, r));
}
@Override
public Uni<K> randomkey() {
return super._randomkey()
.map(this::decodeK);
}
@Override
public Uni<Void> rename(K key, K newKey) {
return super._rename(key, newKey)
.replaceWithVoid();
}
@Override
public Uni<Boolean> renamenx(K key, K newKey) {
return super._renamenx(key, newKey)
.map(Response::toBoolean);
}
@Override
public ReactiveKeyScanCursor<K> scan() {
return new ScanReactiveCursorImpl<>(redis, marshaller, typeOfKey, Collections.emptyList());
}
@Override
public ReactiveKeyScanCursor<K> scan(KeyScanArgs args) {
nonNull(args, "args");
return new ScanReactiveCursorImpl<>(redis, marshaller, typeOfKey, args.toArgs());
}
@Override
public Uni<Integer> touch(K... keys) {
return super._touch(keys)
.map(Response::toInteger);
}
@Override
public Uni<Long> ttl(K key) {
return super._ttl(key)
.map(r -> decodeExpireResponse(key, r));
}
@Override
public Uni<RedisValueType> type(K key) {
return super._type(key)
.map(this::decodeRedisType);
}
@Override
public Uni<Integer> unlink(K... keys) {
return super._unlink(keys)
.map(Response::toInteger);
}
}
| ReactiveKeyCommandsImpl |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTerms.java | {
"start": 638,
"end": 767
} | interface ____ extends MultiBucketsAggregation {
/**
* A bucket that is associated with a single term
*/
| RareTerms |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/annotation/DataTypeHint.java | {
"start": 10205,
"end": 10990
} | class ____ {@code "org.joda.time"}. Some classes might be handled as structured
* types on a best effort basis but others will be RAW data types if necessary.
*
* <p>By default, the pattern list is empty which means that an exception is thrown for unmapped
* types. This is helpful to identify and fix faulty implementations. It is generally
* recommended to use SQL-like types instead of enabling RAW opaque types.
*
* <p>If RAW types cannot be avoided, this parameter should be used to enabled them only in
* designated areas (i.e., within package prefixes) in order to not swallow all errors.
*
* <p>This parameter has lower precedence than {@link #allowRawGlobally()} which would globally
* allow RAW types in the annotated | prefix |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/callbacks/returning/Node.java | {
"start": 290,
"end": 1601
} | class ____ {
private Node parent;
private String name;
private List<Node> children;
private List<Attribute> attributes;
public Node() {
// default constructor for MapStruct
}
public Node(String name) {
this.name = name;
this.children = new ArrayList<>();
this.attributes = new ArrayList<>();
}
public Node getParent() {
return parent;
}
public void setParent(Node parent) {
this.parent = parent;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<Node> getChildren() {
return children;
}
public void setChildren(List<Node> children) {
this.children = children;
}
public void addChild(Node node) {
children.add( node );
node.setParent( this );
}
public List<Attribute> getAttributes() {
return attributes;
}
public void setAttributes(List<Attribute> attributes) {
this.attributes = attributes;
}
public void addAttribute(Attribute attribute) {
attributes.add( attribute );
attribute.setNode( this );
}
@Override
public String toString() {
return "Node [name=" + name + "]";
}
}
| Node |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.