language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ext/jdk8/PolymorphicOptionalTest.java | {
"start": 774,
"end": 1397
} | class ____ implements Contained { }
private final ObjectMapper MAPPER = newJsonMapper();
// [datatype-jdk8#14]
@Test
public void testPolymorphic14() throws Exception
{
final Container dto = new Container();
dto.contained = Optional.of(new ContainedImpl());
final String json = MAPPER.writeValueAsString(dto);
final Container fromJson = MAPPER.readValue(json, Container.class);
assertNotNull(fromJson.contained);
assertTrue(fromJson.contained.isPresent());
assertSame(ContainedImpl.class, fromJson.contained.get().getClass());
}
}
| ContainedImpl |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/NetworkClient.java | {
"start": 52387,
"end": 61578
} | class ____ implements MetadataUpdater {
/* the current cluster metadata */
private final Metadata metadata;
// Defined if there is a request in progress, null otherwise
private InProgressData inProgress;
/*
* The time in wall-clock milliseconds when we started attempts to fetch metadata. If empty,
* metadata has not been requested. This is the start time based on which rebootstrap is
* triggered if metadata is not obtained for the configured rebootstrap trigger interval.
* Set to Optional.of(0L) to force rebootstrap immediately.
*/
private Optional<Long> metadataAttemptStartMs = Optional.empty();
DefaultMetadataUpdater(Metadata metadata) {
this.metadata = metadata;
this.inProgress = null;
}
@Override
public List<Node> fetchNodes() {
return metadata.fetch().nodes();
}
@Override
public boolean isUpdateDue(long now) {
return !hasFetchInProgress() && this.metadata.timeToNextUpdate(now) == 0;
}
private boolean hasFetchInProgress() {
return inProgress != null;
}
@Override
public long maybeUpdate(long now) {
// should we update our metadata?
long timeToNextMetadataUpdate = metadata.timeToNextUpdate(now);
long waitForMetadataFetch = hasFetchInProgress() ? defaultRequestTimeoutMs : 0;
long metadataTimeout = Math.max(timeToNextMetadataUpdate, waitForMetadataFetch);
if (metadataTimeout > 0) {
return metadataTimeout;
}
if (metadataAttemptStartMs.isEmpty())
metadataAttemptStartMs = Optional.of(now);
// Beware that the behavior of this method and the computation of timeouts for poll() are
// highly dependent on the behavior of leastLoadedNode.
LeastLoadedNode leastLoadedNode = leastLoadedNode(now);
// Rebootstrap if needed and configured.
if (metadataRecoveryStrategy == MetadataRecoveryStrategy.REBOOTSTRAP
&& !leastLoadedNode.hasNodeAvailableOrConnectionReady()) {
rebootstrap(now);
leastLoadedNode = leastLoadedNode(now);
}
if (leastLoadedNode.node() == null) {
log.debug("Give up sending metadata request since no node is available");
return reconnectBackoffMs;
}
return maybeUpdate(now, leastLoadedNode.node());
}
@Override
public void handleServerDisconnect(long now, String destinationId, Optional<AuthenticationException> maybeFatalException) {
Cluster cluster = metadata.fetch();
// 'processDisconnection' generates warnings for misconfigured bootstrap server configuration
// resulting in 'Connection Refused' and misconfigured security resulting in authentication failures.
// The warning below handles the case where a connection to a broker was established, but was disconnected
// before metadata could be obtained.
if (cluster.isBootstrapConfigured()) {
int nodeId = Integer.parseInt(destinationId);
Node node = cluster.nodeById(nodeId);
if (node != null)
log.warn("Bootstrap broker {} disconnected", node);
}
// If we have a disconnect while an update is due, we treat it as a failed update
// so that we can backoff properly
if (isUpdateDue(now))
handleFailedRequest(now, Optional.empty());
maybeFatalException.ifPresent(metadata::fatalError);
// The disconnect may be the result of stale metadata, so request an update
metadata.requestUpdate(false);
}
@Override
public void handleFailedRequest(long now, Optional<KafkaException> maybeFatalException) {
maybeFatalException.ifPresent(metadata::fatalError);
metadata.failedUpdate(now);
inProgress = null;
}
@Override
public void handleSuccessfulResponse(RequestHeader requestHeader, long now, MetadataResponse response) {
// If any partition has leader with missing listeners, log up to ten of these partitions
// for diagnosing broker configuration issues.
// This could be a transient issue if listeners were added dynamically to brokers.
List<TopicPartition> missingListenerPartitions = response.topicMetadata().stream().flatMap(topicMetadata ->
topicMetadata.partitionMetadata().stream()
.filter(partitionMetadata -> partitionMetadata.error == Errors.LISTENER_NOT_FOUND)
.map(partitionMetadata -> new TopicPartition(topicMetadata.topic(), partitionMetadata.partition())))
.collect(Collectors.toList());
if (!missingListenerPartitions.isEmpty()) {
int count = missingListenerPartitions.size();
log.warn("{} partitions have leader brokers without a matching listener, including {}",
count, missingListenerPartitions.subList(0, Math.min(10, count)));
}
// Check if any topic's metadata failed to get updated
Map<String, Errors> errors = response.errors();
if (!errors.isEmpty())
log.warn("The metadata response from the cluster reported a recoverable issue with correlation id {} : {}", requestHeader.correlationId(), errors);
if (metadataRecoveryStrategy == MetadataRecoveryStrategy.REBOOTSTRAP && response.topLevelError() == Errors.REBOOTSTRAP_REQUIRED) {
log.info("Rebootstrap requested by server.");
initiateRebootstrap();
} else if (response.brokers().isEmpty()) {
// When talking to the startup phase of a broker, it is possible to receive an empty metadata set, which
// we should retry later.
log.trace("Ignoring empty metadata response with correlation id {}.", requestHeader.correlationId());
this.metadata.failedUpdate(now);
} else {
this.metadata.update(inProgress.requestVersion, response, inProgress.isPartialUpdate, now);
metadataAttemptStartMs = Optional.empty();
}
inProgress = null;
}
@Override
public boolean needsRebootstrap(long now, long rebootstrapTriggerMs) {
return metadataAttemptStartMs.filter(startMs -> now - startMs > rebootstrapTriggerMs).isPresent();
}
@Override
public void rebootstrap(long now) {
metadata.rebootstrap();
metadataAttemptStartMs = Optional.of(now);
}
@Override
public void close() {
this.metadata.close();
}
private void initiateRebootstrap() {
metadataAttemptStartMs = Optional.of(0L); // to force rebootstrap
}
/**
* Add a metadata request to the list of sends if we can make one
*/
private long maybeUpdate(long now, Node node) {
String nodeConnectionId = node.idString();
if (canSendRequest(nodeConnectionId, now)) {
Metadata.MetadataRequestAndVersion requestAndVersion = metadata.newMetadataRequestAndVersion(now);
MetadataRequest.Builder metadataRequest = requestAndVersion.requestBuilder;
log.debug("Sending metadata request {} to node {}", metadataRequest, node);
sendInternalMetadataRequest(metadataRequest, nodeConnectionId, now);
inProgress = new InProgressData(requestAndVersion.requestVersion, requestAndVersion.isPartialUpdate);
return defaultRequestTimeoutMs;
}
// If there's any connection establishment underway, wait until it completes. This prevents
// the client from unnecessarily connecting to additional nodes while a previous connection
// attempt has not been completed.
if (isAnyNodeConnecting()) {
// Strictly the timeout we should return here is "connect timeout", but as we don't
// have such application level configuration, using reconnect backoff instead.
return reconnectBackoffMs;
}
if (connectionStates.canConnect(nodeConnectionId, now)) {
// We don't have a connection to this node right now, make one
log.debug("Initialize connection to node {} for sending metadata request", node);
initiateConnect(node, now);
return reconnectBackoffMs;
}
// connected, but can't send more OR connecting
// In either case, we just need to wait for a network event to let us know the selected
// connection might be usable again.
return Long.MAX_VALUE;
}
public | DefaultMetadataUpdater |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryDefaultInEnumSwitchTest.java | {
"start": 34817,
"end": 35218
} | enum ____ {
ONE,
TWO,
UNRECOGNIZED
}
boolean m(Case c) {
return switch (c) {
case ONE -> true;
case TWO -> false;
case UNRECOGNIZED -> throw new AssertionError();
};
}
}
""")
.doTest();
}
}
| Case |
java | bumptech__glide | annotation/compiler/test/src/test/resources/GlideExtensionWithTypeTest/ExtensionWithType.java | {
"start": 243,
"end": 475
} | class ____ {
private ExtensionWithType() {
// Utility class.
}
@NonNull
@GlideType(Number.class)
public static RequestBuilder<Number> asNumber(RequestBuilder<Number> builder) {
return builder;
}
}
| ExtensionWithType |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java | {
"start": 1605,
"end": 1692
} | class ____<K> {
/**
* Class for tracking use count of a name
*/
private | NameCache |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/query/EqlQueryRenderer.java | {
"start": 1473,
"end": 30733
} | class ____ extends EqlBaseVisitor<QueryTokenStream> {
/**
* Is this AST tree a {@literal subquery}?
*
* @return {@literal true} is the query is a subquery; {@literal false} otherwise.
*/
static boolean isSubquery(ParserRuleContext ctx) {
while (ctx != null) {
if (ctx instanceof EqlParser.SubqueryContext) {
return true;
}
if (ctx instanceof EqlParser.Update_statementContext || ctx instanceof EqlParser.Delete_statementContext) {
return false;
}
ctx = ctx.getParent();
}
return false;
}
/**
* Is this AST tree a {@literal set} query that has been added through {@literal UNION|INTERSECT|EXCEPT}?
*
* @return boolean
*/
static boolean isSetQuery(ParserRuleContext ctx) {
while (ctx != null) {
if (ctx instanceof EqlParser.Set_fuctionContext) {
return true;
}
ctx = ctx.getParent();
}
return false;
}
@Override
public QueryTokenStream visitStart(EqlParser.StartContext ctx) {
return visit(ctx.ql_statement());
}
@Override
public QueryTokenStream visitFrom_clause(EqlParser.From_clauseContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.append(QueryTokens.expression(ctx.FROM()));
builder.appendInline(visit(ctx.identification_variable_declaration()));
if (!ctx.identificationVariableDeclarationOrCollectionMemberDeclaration().isEmpty()) {
builder.append(TOKEN_COMMA);
}
builder.appendExpression(QueryTokenStream
.concat(ctx.identificationVariableDeclarationOrCollectionMemberDeclaration(), this::visit, TOKEN_COMMA));
return builder;
}
@Override
public QueryTokenStream visitIdentificationVariableDeclarationOrCollectionMemberDeclaration(
EqlParser.IdentificationVariableDeclarationOrCollectionMemberDeclarationContext ctx) {
if (ctx.subquery() != null) {
QueryRendererBuilder nested = QueryRenderer.builder();
nested.append(TOKEN_OPEN_PAREN);
nested.appendInline(visit(ctx.subquery()));
nested.append(TOKEN_CLOSE_PAREN);
QueryRendererBuilder builder = QueryRenderer.builder();
builder.appendExpression(nested);
if (ctx.AS() != null) {
builder.append(QueryTokens.expression(ctx.AS()));
}
if (ctx.identification_variable() != null) {
builder.appendExpression(visit(ctx.identification_variable()));
}
return builder;
}
return super.visitIdentificationVariableDeclarationOrCollectionMemberDeclaration(ctx);
}
@Override
public QueryTokenStream visitJoin_association_path_expression(EqlParser.Join_association_path_expressionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.TREAT() == null) {
if (ctx.join_collection_valued_path_expression() != null) {
builder.appendExpression(visit(ctx.join_collection_valued_path_expression()));
} else if (ctx.join_single_valued_path_expression() != null) {
builder.appendExpression(visit(ctx.join_single_valued_path_expression()));
}
} else {
QueryRendererBuilder nested = QueryRenderer.builder();
if (ctx.join_collection_valued_path_expression() != null) {
nested.appendExpression(visit(ctx.join_collection_valued_path_expression()));
nested.append(QueryTokens.expression(ctx.AS()));
nested.appendExpression(visit(ctx.subtype()));
} else if (ctx.join_single_valued_path_expression() != null) {
nested.appendExpression(visit(ctx.join_single_valued_path_expression()));
nested.append(QueryTokens.expression(ctx.AS()));
nested.appendExpression(visit(ctx.subtype()));
}
builder.append(QueryTokens.token(ctx.TREAT()));
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(nested);
builder.append(TOKEN_CLOSE_PAREN);
}
return builder;
}
@Override
public QueryTokenStream visitJoin_collection_valued_path_expression(
EqlParser.Join_collection_valued_path_expressionContext ctx) {
List<ParseTree> items = new ArrayList<>(2 + ctx.single_valued_embeddable_object_field().size());
if (ctx.identification_variable() != null) {
items.add(ctx.identification_variable());
}
items.addAll(ctx.single_valued_embeddable_object_field());
items.add(ctx.collection_valued_field());
return QueryTokenStream.concat(items, this::visit, TOKEN_DOT);
}
@Override
public QueryTokenStream visitJoin_single_valued_path_expression(
EqlParser.Join_single_valued_path_expressionContext ctx) {
List<ParseTree> items = new ArrayList<>(2 + ctx.single_valued_embeddable_object_field().size());
if (ctx.identification_variable() != null) {
items.add(ctx.identification_variable());
}
items.addAll(ctx.single_valued_embeddable_object_field());
items.add(ctx.single_valued_object_field());
return QueryTokenStream.concat(items, this::visit, TOKEN_DOT);
}
@Override
public QueryTokenStream visitCollection_member_declaration(EqlParser.Collection_member_declarationContext ctx) {
QueryRendererBuilder nested = QueryRenderer.builder();
nested.append(QueryTokens.token(ctx.IN()));
nested.append(TOKEN_OPEN_PAREN);
nested.appendInline(visit(ctx.collection_valued_path_expression()));
nested.append(TOKEN_CLOSE_PAREN);
QueryRendererBuilder builder = QueryRenderer.builder();
builder.appendExpression(nested);
if (ctx.AS() != null) {
builder.append(QueryTokens.expression(ctx.AS()));
}
if (ctx.identification_variable() != null) {
builder.appendExpression(visit(ctx.identification_variable()));
}
return builder;
}
@Override
public QueryTokenStream visitQualified_identification_variable(
EqlParser.Qualified_identification_variableContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.map_field_identification_variable() != null) {
builder.append(visit(ctx.map_field_identification_variable()));
} else if (ctx.identification_variable() != null) {
builder.append(QueryTokens.expression(ctx.ENTRY()));
builder.append(TOKEN_OPEN_PAREN);
builder.append(visit(ctx.identification_variable()));
builder.append(TOKEN_CLOSE_PAREN);
}
return builder;
}
@Override
public QueryTokenStream visitMap_field_identification_variable(
EqlParser.Map_field_identification_variableContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.KEY() != null) {
builder.append(QueryTokens.token(ctx.KEY()));
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(visit(ctx.identification_variable()));
builder.append(TOKEN_CLOSE_PAREN);
} else if (ctx.VALUE() != null) {
builder.append(QueryTokens.token(ctx.VALUE()));
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(visit(ctx.identification_variable()));
builder.append(TOKEN_CLOSE_PAREN);
}
return builder;
}
@Override
public QueryTokenStream visitSingle_valued_path_expression(EqlParser.Single_valued_path_expressionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.qualified_identification_variable() != null) {
builder.append(visit(ctx.qualified_identification_variable()));
} else if (ctx.qualified_identification_variable() != null) {
builder.append(QueryTokens.token(ctx.TREAT()));
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(visit(ctx.qualified_identification_variable()));
builder.append(QueryTokens.expression(ctx.AS()));
builder.appendInline(visit(ctx.subtype()));
builder.append(TOKEN_CLOSE_PAREN);
} else if (ctx.state_field_path_expression() != null) {
builder.append(visit(ctx.state_field_path_expression()));
} else if (ctx.single_valued_object_path_expression() != null) {
builder.append(visit(ctx.single_valued_object_path_expression()));
}
return builder;
}
@Override
public QueryTokenStream visitGeneral_subpath(EqlParser.General_subpathContext ctx) {
if (ctx.simple_subpath() != null) {
return visit(ctx.simple_subpath());
} else if (ctx.treated_subpath() != null) {
List<ParseTree> items = new ArrayList<>(1 + ctx.single_valued_object_field().size());
items.add(ctx.treated_subpath());
items.addAll(ctx.single_valued_object_field());
return QueryTokenStream.concat(items, this::visit, TOKEN_DOT);
}
return QueryTokenStream.empty();
}
@Override
public QueryTokenStream visitSimple_subpath(EqlParser.Simple_subpathContext ctx) {
List<ParseTree> items = new ArrayList<>(1 + ctx.single_valued_object_field().size());
items.add(ctx.general_identification_variable());
items.addAll(ctx.single_valued_object_field());
return QueryTokenStream.concat(items, this::visit, TOKEN_DOT);
}
@Override
public QueryTokenStream visitTreated_subpath(EqlParser.Treated_subpathContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
QueryRendererBuilder nested = QueryRenderer.builder();
nested.appendExpression(visit(ctx.general_subpath()));
nested.append(QueryTokens.expression(ctx.AS()));
nested.appendExpression(visit(ctx.subtype()));
builder.append(QueryTokens.token(ctx.TREAT()));
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(nested);
builder.append(TOKEN_CLOSE_PAREN);
return builder;
}
@Override
public QueryTokenStream visitState_field_path_expression(EqlParser.State_field_path_expressionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.appendInline(visit(ctx.general_subpath()));
builder.append(TOKEN_DOT);
builder.appendInline(visit(ctx.state_field()));
return builder;
}
@Override
public QueryTokenStream visitSingle_valued_object_path_expression(
EqlParser.Single_valued_object_path_expressionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.appendInline(visit(ctx.general_subpath()));
builder.append(TOKEN_DOT);
builder.appendInline(visit(ctx.single_valued_object_field()));
return builder;
}
@Override
public QueryTokenStream visitCollection_valued_path_expression(
EqlParser.Collection_valued_path_expressionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.appendInline(visit(ctx.general_subpath()));
builder.append(TOKEN_DOT);
builder.appendInline(visit(ctx.collection_value_field()));
return builder;
}
@Override
public QueryTokenStream visitUpdate_clause(EqlParser.Update_clauseContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.append(QueryTokens.expression(ctx.UPDATE()));
builder.appendExpression(visit(ctx.entity_name()));
if (ctx.AS() != null) {
builder.append(QueryTokens.expression(ctx.AS()));
}
if (ctx.identification_variable() != null) {
builder.appendExpression(visit(ctx.identification_variable()));
}
builder.append(QueryTokens.expression(ctx.SET()));
builder.append(QueryTokenStream.concat(ctx.update_item(), this::visit, TOKEN_COMMA));
return builder;
}
@Override
public QueryTokenStream visitUpdate_item(EqlParser.Update_itemContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
List<ParseTree> items = new ArrayList<>(3 + ctx.single_valued_embeddable_object_field().size());
if (ctx.identification_variable() != null) {
items.add(ctx.identification_variable());
}
items.addAll(ctx.single_valued_embeddable_object_field());
if (ctx.state_field() != null) {
items.add(ctx.state_field());
} else if (ctx.single_valued_object_field() != null) {
items.add(ctx.single_valued_object_field());
}
builder.appendInline(QueryTokenStream.concat(items, this::visit, TOKEN_DOT));
builder.append(TOKEN_EQUALS);
builder.append(visit(ctx.new_value()));
return builder;
}
@Override
public QueryTokenStream visitSelect_clause(EqlParser.Select_clauseContext ctx) {
QueryRendererBuilder builder = prepareSelectClause(ctx);
builder.appendExpression(QueryTokenStream.concat(ctx.select_item(), this::visit, TOKEN_COMMA));
return builder;
}
QueryRendererBuilder prepareSelectClause(EqlParser.Select_clauseContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.append(QueryTokens.expression(ctx.SELECT()));
if (ctx.DISTINCT() != null) {
builder.append(QueryTokens.expression(ctx.DISTINCT()));
}
return builder;
}
@Override
public QueryTokenStream visitSelect_expression(EqlParser.Select_expressionContext ctx) {
if (ctx.identification_variable() != null && ctx.OBJECT() != null) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.append(QueryTokens.token(ctx.OBJECT()));
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(visit(ctx.identification_variable()));
builder.append(TOKEN_CLOSE_PAREN);
return builder;
}
return super.visitSelect_expression(ctx);
}
@Override
public QueryTokenStream visitConstructor_expression(EqlParser.Constructor_expressionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.append(QueryTokens.expression(ctx.NEW()));
builder.append(visit(ctx.constructor_name()));
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(QueryTokenStream.concat(ctx.constructor_item(), this::visit, TOKEN_COMMA));
builder.append(TOKEN_CLOSE_PAREN);
return builder;
}
@Override
public QueryTokenStream visitAggregate_expression(EqlParser.Aggregate_expressionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.AVG() != null || ctx.MAX() != null || ctx.MIN() != null || ctx.SUM() != null) {
if (ctx.AVG() != null) {
builder.append(QueryTokens.token(ctx.AVG()));
}
if (ctx.MAX() != null) {
builder.append(QueryTokens.token(ctx.MAX()));
}
if (ctx.MIN() != null) {
builder.append(QueryTokens.token(ctx.MIN()));
}
if (ctx.SUM() != null) {
builder.append(QueryTokens.token(ctx.SUM()));
}
builder.append(TOKEN_OPEN_PAREN);
if (ctx.DISTINCT() != null) {
builder.append(QueryTokens.expression(ctx.DISTINCT()));
}
builder.appendInline(visit(ctx.simple_select_expression()));
builder.append(TOKEN_CLOSE_PAREN);
} else if (ctx.COUNT() != null) {
builder.append(QueryTokens.token(ctx.COUNT()));
builder.append(TOKEN_OPEN_PAREN);
if (ctx.DISTINCT() != null) {
builder.append(QueryTokens.expression(ctx.DISTINCT()));
}
builder.appendInline(visit(ctx.simple_select_expression()));
builder.append(TOKEN_CLOSE_PAREN);
} else if (ctx.function_invocation() != null) {
builder.append(visit(ctx.function_invocation()));
}
return builder;
}
@Override
public QueryTokenStream visitGroupby_clause(EqlParser.Groupby_clauseContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.append(QueryTokens.expression(ctx.GROUP()));
builder.append(QueryTokens.expression(ctx.BY()));
builder.appendExpression(QueryTokenStream.concat(ctx.groupby_item(), this::visit, TOKEN_COMMA));
return builder;
}
@Override
public QueryTokenStream visitOrderby_clause(EqlParser.Orderby_clauseContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.append(QueryTokens.expression(ctx.ORDER()));
builder.append(QueryTokens.expression(ctx.BY()));
builder.append(QueryTokenStream.concat(ctx.orderby_item(), this::visit, TOKEN_COMMA));
return builder;
}
@Override
public QueryTokenStream visitSubquery_from_clause(EqlParser.Subquery_from_clauseContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.append(QueryTokens.expression(ctx.FROM()));
builder.appendExpression(
QueryTokenStream.concat(ctx.subselect_identification_variable_declaration(), this::visit, TOKEN_COMMA));
return builder;
}
@Override
public QueryTokenStream visitConditional_primary(EqlParser.Conditional_primaryContext ctx) {
if (ctx.conditional_expression() != null) {
return QueryTokenStream.group(visit(ctx.conditional_expression()));
}
return super.visitConditional_primary(ctx);
}
@Override
public QueryTokenStream visitIn_expression(EqlParser.In_expressionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.string_expression() != null) {
builder.appendExpression(visit(ctx.string_expression()));
}
if (ctx.type_discriminator() != null) {
builder.appendExpression(visit(ctx.type_discriminator()));
}
if (ctx.NOT() != null) {
builder.append(QueryTokens.expression(ctx.NOT()));
}
if (ctx.IN() != null) {
builder.append(QueryTokens.expression(ctx.IN()));
}
if (ctx.in_item() != null && !ctx.in_item().isEmpty()) {
builder.append(QueryTokenStream.group(QueryTokenStream.concat(ctx.in_item(), this::visit, TOKEN_COMMA)));
} else if (ctx.subquery() != null) {
builder.append(QueryTokenStream.group(visit(ctx.subquery())));
} else if (ctx.collection_valued_input_parameter() != null) {
builder.append(visit(ctx.collection_valued_input_parameter()));
}
return builder;
}
@Override
public QueryTokenStream visitExists_expression(EqlParser.Exists_expressionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.NOT() != null) {
builder.append(QueryTokens.expression(ctx.NOT()));
}
builder.append(QueryTokens.expression(ctx.EXISTS()));
builder.append(QueryTokenStream.group(visit(ctx.subquery())));
return builder;
}
@Override
public QueryTokenStream visitAll_or_any_expression(EqlParser.All_or_any_expressionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.ALL() != null) {
builder.append(QueryTokens.expression(ctx.ALL()));
} else if (ctx.ANY() != null) {
builder.append(QueryTokens.expression(ctx.ANY()));
} else if (ctx.SOME() != null) {
builder.append(QueryTokens.expression(ctx.SOME()));
}
builder.append(QueryTokenStream.group(visit(ctx.subquery())));
return builder;
}
@Override
public QueryTokenStream visitArithmetic_factor(EqlParser.Arithmetic_factorContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.op != null) {
builder.append(QueryTokens.token(ctx.op));
}
builder.append(visit(ctx.arithmetic_primary()));
return builder;
}
@Override
public QueryTokenStream visitArithmetic_primary(EqlParser.Arithmetic_primaryContext ctx) {
if (ctx.arithmetic_expression() != null) {
return QueryTokenStream.group(visit(ctx.arithmetic_expression()));
} else if (ctx.subquery() != null) {
return QueryTokenStream.group(visit(ctx.subquery()));
}
return super.visitArithmetic_primary(ctx);
}
@Override
public QueryTokenStream visitString_expression(EqlParser.String_expressionContext ctx) {
if (ctx.subquery() != null) {
return QueryTokenStream.group(visit(ctx.subquery()));
}
return super.visitString_expression(ctx);
}
@Override
public QueryTokenStream visitDatetime_expression(EqlParser.Datetime_expressionContext ctx) {
if (ctx.subquery() != null) {
return QueryTokenStream.group(visit(ctx.subquery()));
}
return super.visitDatetime_expression(ctx);
}
@Override
public QueryTokenStream visitBoolean_expression(EqlParser.Boolean_expressionContext ctx) {
if (ctx.subquery() != null) {
return QueryTokenStream.group(visit(ctx.subquery()));
}
return super.visitBoolean_expression(ctx);
}
@Override
public QueryTokenStream visitEnum_expression(EqlParser.Enum_expressionContext ctx) {
if (ctx.subquery() != null) {
return QueryTokenStream.group(visit(ctx.subquery()));
}
return super.visitEnum_expression(ctx);
}
@Override
public QueryTokenStream visitType_discriminator(EqlParser.Type_discriminatorContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.general_identification_variable() != null) {
builder.append(visit(ctx.general_identification_variable()));
} else if (ctx.single_valued_object_path_expression() != null) {
builder.append(visit(ctx.single_valued_object_path_expression()));
} else if (ctx.input_parameter() != null) {
builder.append(visit(ctx.input_parameter()));
}
return QueryTokenStream.ofFunction(ctx.TYPE(), builder);
}
@Override
public QueryTokenStream visitFunctions_returning_numerics(EqlParser.Functions_returning_numericsContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.LENGTH() != null) {
return QueryTokenStream.ofFunction(ctx.LENGTH(), visit(ctx.string_expression(0)));
} else if (ctx.LOCATE() != null) {
builder.appendInline(visit(ctx.string_expression(0)));
builder.append(TOKEN_COMMA);
builder.appendInline(visit(ctx.string_expression(1)));
if (ctx.arithmetic_expression() != null) {
builder.append(TOKEN_COMMA);
builder.appendInline(visit(ctx.arithmetic_expression(0)));
}
return QueryTokenStream.ofFunction(ctx.LOCATE(), builder);
} else if (ctx.ABS() != null) {
return QueryTokenStream.ofFunction(ctx.ABS(), visit(ctx.arithmetic_expression(0)));
} else if (ctx.CEILING() != null) {
return QueryTokenStream.ofFunction(ctx.CEILING(), visit(ctx.arithmetic_expression(0)));
} else if (ctx.EXP() != null) {
return QueryTokenStream.ofFunction(ctx.EXP(), visit(ctx.arithmetic_expression(0)));
} else if (ctx.FLOOR() != null) {
return QueryTokenStream.ofFunction(ctx.FLOOR(), visit(ctx.arithmetic_expression(0)));
} else if (ctx.LN() != null) {
return QueryTokenStream.ofFunction(ctx.LN(), visit(ctx.arithmetic_expression(0)));
} else if (ctx.SIGN() != null) {
return QueryTokenStream.ofFunction(ctx.SIGN(), visit(ctx.arithmetic_expression(0)));
} else if (ctx.SQRT() != null) {
return QueryTokenStream.ofFunction(ctx.SQRT(), visit(ctx.arithmetic_expression(0)));
} else if (ctx.MOD() != null) {
builder.appendInline(visit(ctx.arithmetic_expression(0)));
builder.append(TOKEN_COMMA);
builder.appendInline(visit(ctx.arithmetic_expression(1)));
return QueryTokenStream.ofFunction(ctx.MOD(), builder);
} else if (ctx.POWER() != null) {
builder.appendInline(visit(ctx.arithmetic_expression(0)));
builder.append(TOKEN_COMMA);
builder.appendInline(visit(ctx.arithmetic_expression(1)));
return QueryTokenStream.ofFunction(ctx.POWER(), builder);
} else if (ctx.ROUND() != null) {
builder.appendInline(visit(ctx.arithmetic_expression(0)));
builder.append(TOKEN_COMMA);
builder.appendInline(visit(ctx.arithmetic_expression(1)));
return QueryTokenStream.ofFunction(ctx.ROUND(), builder);
} else if (ctx.SIZE() != null) {
return QueryTokenStream.ofFunction(ctx.SIZE(), visit(ctx.collection_valued_path_expression()));
} else if (ctx.INDEX() != null) {
return QueryTokenStream.ofFunction(ctx.INDEX(), visit(ctx.identification_variable()));
} else if (ctx.extract_datetime_field() != null) {
builder.append(visit(ctx.extract_datetime_field()));
}
return builder;
}
@Override
public QueryTokenStream visitFunctions_returning_strings(EqlParser.Functions_returning_stringsContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.CONCAT() != null) {
return QueryTokenStream.ofFunction(ctx.CONCAT(),
QueryTokenStream.concat(ctx.string_expression(), this::visit, TOKEN_COMMA));
} else if (ctx.SUBSTRING() != null) {
builder.append(visit(ctx.string_expression(0)));
builder.append(TOKEN_COMMA);
builder.appendInline(QueryTokenStream.concat(ctx.arithmetic_expression(), this::visit, TOKEN_COMMA));
return QueryTokenStream.ofFunction(ctx.SUBSTRING(), builder);
} else if (ctx.TRIM() != null) {
if (ctx.trim_specification() != null) {
builder.appendExpression(visit(ctx.trim_specification()));
}
if (ctx.trim_character() != null) {
builder.appendExpression(visit(ctx.trim_character()));
}
if (ctx.FROM() != null) {
builder.append(QueryTokens.expression(ctx.FROM()));
}
builder.append(visit(ctx.string_expression(0)));
return QueryTokenStream.ofFunction(ctx.TRIM(), builder);
} else if (ctx.LOWER() != null) {
return QueryTokenStream.ofFunction(ctx.LOWER(),
QueryTokenStream.concat(ctx.string_expression(), this::visit, TOKEN_COMMA));
} else if (ctx.UPPER() != null) {
return QueryTokenStream.ofFunction(ctx.UPPER(),
QueryTokenStream.concat(ctx.string_expression(), this::visit, TOKEN_COMMA));
} else if (ctx.LEFT() != null) {
builder.append(visit(ctx.string_expression(0)));
builder.append(TOKEN_COMMA);
builder.append(visit(ctx.arithmetic_expression(0)));
return QueryTokenStream.ofFunction(ctx.LEFT(), builder);
} else if (ctx.RIGHT() != null) {
builder.appendInline(visit(ctx.string_expression(0)));
builder.append(TOKEN_COMMA);
builder.append(visit(ctx.arithmetic_expression(0)));
return QueryTokenStream.ofFunction(ctx.RIGHT(), builder);
} else if (ctx.REPLACE() != null) {
return QueryTokenStream.ofFunction(ctx.REPLACE(),
QueryTokenStream.concat(ctx.string_expression(), this::visit, TOKEN_COMMA));
}
return builder;
}
@Override
public QueryTokenStream visitArithmetic_cast_function(EqlParser.Arithmetic_cast_functionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.appendExpression(visit(ctx.string_expression()));
if (ctx.AS() != null) {
builder.append(QueryTokens.expression(ctx.AS()));
}
builder.append(QueryTokens.token(ctx.f));
return QueryTokenStream.ofFunction(ctx.CAST(), builder);
}
@Override
public QueryTokenStream visitType_cast_function(EqlParser.Type_cast_functionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.appendExpression(visit(ctx.scalar_expression()));
if (ctx.AS() != null) {
builder.append(QueryTokens.expression(ctx.AS()));
}
builder.appendInline(visit(ctx.identification_variable()));
if (!CollectionUtils.isEmpty(ctx.numeric_literal())) {
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(QueryTokenStream.concat(ctx.numeric_literal(), this::visit, TOKEN_COMMA));
builder.append(TOKEN_CLOSE_PAREN);
}
return QueryTokenStream.ofFunction(ctx.CAST(), builder);
}
@Override
public QueryTokenStream visitString_cast_function(EqlParser.String_cast_functionContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
builder.appendExpression(visit(ctx.scalar_expression()));
if (ctx.AS() != null) {
builder.append(QueryTokens.expression(ctx.AS()));
}
builder.append(QueryTokens.token(ctx.STRING()));
return QueryTokenStream.ofFunction(ctx.CAST(), builder);
}
@Override
public QueryTokenStream visitFunction_invocation(EqlParser.Function_invocationContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.FUNCTION() != null) {
builder.append(QueryTokens.token(ctx.FUNCTION()));
} else if (ctx.identification_variable() != null) {
builder.appendInline(visit(ctx.identification_variable()));
}
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(visit(ctx.function_name()));
if (!ctx.function_arg().isEmpty()) {
builder.append(TOKEN_COMMA);
}
builder.appendInline(QueryTokenStream.concat(ctx.function_arg(), this::visit, TOKEN_COMMA));
builder.append(TOKEN_CLOSE_PAREN);
return builder;
}
@Override
public QueryTokenStream visitExtract_datetime_field(EqlParser.Extract_datetime_fieldContext ctx) {
QueryRendererBuilder nested = QueryRenderer.builder();
nested.appendExpression(visit(ctx.datetime_field()));
nested.append(QueryTokens.expression(ctx.FROM()));
nested.appendExpression(visit(ctx.datetime_expression()));
return QueryTokenStream.ofFunction(ctx.EXTRACT(), nested);
}
@Override
public QueryTokenStream visitExtract_datetime_part(EqlParser.Extract_datetime_partContext ctx) {
QueryRendererBuilder nested = QueryRenderer.builder();
nested.appendExpression(visit(ctx.datetime_part()));
nested.append(QueryTokens.expression(ctx.FROM()));
nested.appendExpression(visit(ctx.datetime_expression()));
return QueryTokenStream.ofFunction(ctx.EXTRACT(), nested);
}
@Override
public QueryTokenStream visitCoalesce_expression(EqlParser.Coalesce_expressionContext ctx) {
return QueryTokenStream.ofFunction(ctx.COALESCE(),
QueryTokenStream.concat(ctx.scalar_expression(), this::visit, TOKEN_COMMA));
}
@Override
public QueryTokenStream visitNullif_expression(EqlParser.Nullif_expressionContext ctx) {
return QueryTokenStream.ofFunction(ctx.NULLIF(),
QueryTokenStream.concat(ctx.scalar_expression(), this::visit, TOKEN_COMMA));
}
@Override
public QueryTokenStream visitInput_parameter(EqlParser.Input_parameterContext ctx) {
QueryRendererBuilder builder = QueryRenderer.builder();
if (ctx.INTLITERAL() != null) {
builder.append(TOKEN_QUESTION_MARK);
builder.append(QueryTokens.token(ctx.INTLITERAL()));
} else if (ctx.identification_variable() != null) {
builder.append(TOKEN_COLON);
builder.appendInline(visit(ctx.identification_variable()));
}
return builder;
}
@Override
public QueryTokenStream visitEntity_name(EqlParser.Entity_nameContext ctx) {
return QueryTokenStream.concat(ctx.reserved_word(), this::visit, TOKEN_DOT);
}
@Override
public QueryTokenStream visitChildren(RuleNode node) {
int childCount = node.getChildCount();
if (childCount == 1 && node.getChild(0) instanceof RuleContext t) {
return visit(t);
}
if (childCount == 1 && node.getChild(0) instanceof TerminalNode t) {
return QueryTokens.token(t);
}
return QueryTokenStream.concatExpressions(node, this::visit);
}
}
| EqlQueryRenderer |
java | google__guava | android/guava-tests/test/com/google/common/util/concurrent/CycleDetectingLockFactoryTest.java | {
"start": 14599,
"end": 15991
} | class ____ extends Thread {
final CountDownLatch locked = new CountDownLatch(1);
final CountDownLatch finishLatch = new CountDownLatch(1);
final Lock lock;
LockingThread(Lock lock) {
this.lock = lock;
}
@Override
public void run() {
lock.lock();
try {
locked.countDown();
finishLatch.await(1, MINUTES);
} catch (InterruptedException e) {
fail(e.toString());
} finally {
lock.unlock();
}
}
void waitUntilHoldingLock() throws InterruptedException {
locked.await(1, MINUTES);
}
void releaseLockAndFinish() throws InterruptedException {
finishLatch.countDown();
this.join(10000);
assertFalse(this.isAlive());
}
}
public void testReentrantReadWriteLock_implDoesNotExposeShadowedLocks() {
assertEquals(
"Unexpected number of public methods in ReentrantReadWriteLock. "
+ "The correctness of CycleDetectingReentrantReadWriteLock depends on "
+ "the fact that the shadowed ReadLock and WriteLock are never used or "
+ "exposed by the superclass implementation. If the implementation has "
+ "changed, the code must be re-inspected to ensure that the "
+ "assumption is still valid.",
24,
ReentrantReadWriteLock.class.getMethods().length);
}
private | LockingThread |
java | micronaut-projects__micronaut-core | inject-groovy/src/main/groovy/io/micronaut/ast/groovy/scan/AnnotationClassReader.java | {
"start": 3888,
"end": 4061
} | class ____ be parsed. <i>The content of this array must not be
* modified. This field is intended for {@link Attribute} sub classes, and
* is normally not needed by | to |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/format/MapEntryFormatTest.java | {
"start": 1672,
"end": 2098
} | class ____ {
@JsonInclude(value=JsonInclude.Include.NON_EMPTY,
content=JsonInclude.Include.NON_NULL)
public Map.Entry<String,String> entry;
public EntryWithNullWrapper(String key, String value) {
HashMap<String,String> map = new HashMap<>();
map.put(key, value);
entry = map.entrySet().iterator().next();
}
}
static | EntryWithNullWrapper |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/example/java/WordCountSimplePOJOITCase.java | {
"start": 3501,
"end": 4076
} | class ____ implements FlatMapFunction<String, WC> {
private static final long serialVersionUID = 1L;
@Override
public void flatMap(String value, Collector<WC> out) {
// normalize and split the line
String[] tokens = value.toLowerCase().split("\\W+");
// emit the pairs
for (String token : tokens) {
if (token.length() > 0) {
out.collect(new WC(token, 1));
}
}
}
}
/** POJO with word and count. */
public static | Tokenizer |
java | google__auto | value/src/main/java/com/google/auto/value/processor/AutoValueishProcessor.java | {
"start": 48456,
"end": 50413
} | class ____ should not be implicitly copied. Doing so can mislead
// static analysis or metaprogramming tooling that reads the data
// contained in these annotations.
//
// It may be surprising to see AutoValue classes written in Kotlin
// when they could be written as Kotlin data classes, but this can
// come up in cases where consumers rely on AutoValue features or
// extensions that are not available in data classes.
//
// See: https://github.com/google/auto/issues/1087
//
.add(ClassNames.KOTLIN_METADATA_NAME)
.build();
return copyAnnotations(type, type, excludedAnnotations);
} else {
return ImmutableList.of();
}
}
/** Implements the semantics of {@code AutoValue.CopyAnnotations}; see its javadoc. */
ImmutableList<String> copyAnnotations(
Element autoValueType, Element typeOrMethod, Set<String> excludedAnnotations) {
ImmutableList<AnnotationMirror> annotationsToCopy =
annotationsToCopy(autoValueType, typeOrMethod, excludedAnnotations);
return annotationStrings(annotationsToCopy);
}
/**
* Returns the contents of the {@code AutoValue.CopyAnnotations.exclude} element, as a set of
* {@code TypeMirror} where each type is an annotation type.
*/
private static Set<TypeMirror> getExcludedAnnotationTypes(Element element) {
Optional<AnnotationMirror> maybeAnnotation =
getAnnotationMirror(element, COPY_ANNOTATIONS_NAME);
if (!maybeAnnotation.isPresent()) {
return ImmutableSet.of();
}
@SuppressWarnings("unchecked")
List<AnnotationValue> excludedClasses =
(List<AnnotationValue>) getAnnotationValue(maybeAnnotation.get(), "exclude").getValue();
// It turns out that if you write `@AutoValue.CopyAnnotations(exclude = {Missing.class})`, where
// `Missing` is a | and |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/bundle/MapBundleFunction.java | {
"start": 1140,
"end": 1396
} | interface ____ map bundle processing.
*
* @param <K> The type of the key in the bundle map
* @param <V> The type of the value in the bundle map
* @param <IN> Type of the input elements.
* @param <OUT> Type of the returned elements.
*/
public abstract | for |
java | apache__kafka | server-common/src/test/java/org/apache/kafka/timeline/SnapshottableHashTableTest.java | {
"start": 1880,
"end": 13880
} | class ____ implements SnapshottableHashTable.ElementWithStartEpoch {
private final int i;
private final char j;
private long startEpoch = Long.MAX_VALUE;
TestElement(int i, char j) {
this.i = i;
this.j = j;
}
@Override
public void setStartEpoch(long startEpoch) {
this.startEpoch = startEpoch;
}
@Override
public long startEpoch() {
return startEpoch;
}
@Override
public int hashCode() {
return i;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof TestElement other)) {
return false;
}
return other.i == i;
}
@Override
public String toString() {
return String.format("E_%d%c(%s)", i, j, System.identityHashCode(this));
}
}
private static final TestElement E_1A = new TestElement(1, 'A');
private static final TestElement E_1B = new TestElement(1, 'B');
private static final TestElement E_2A = new TestElement(2, 'A');
private static final TestElement E_3A = new TestElement(3, 'A');
private static final TestElement E_3B = new TestElement(3, 'B');
@Test
public void testEmptyTable() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
SnapshottableHashTable<TestElement> table =
new SnapshottableHashTable<>(registry, 1);
assertEquals(0, table.snapshottableSize(Long.MAX_VALUE));
}
@Test
public void testDeleteOnEmptyDeltaTable() {
// A simple test case to validate the behavior of the TimelineHashSet
// when the deltaTable for a snapshot is null
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
TimelineHashSet<String> set = new TimelineHashSet<>(registry, 5);
registry.getOrCreateSnapshot(100);
set.add("bar");
registry.getOrCreateSnapshot(200);
set.add("baz");
// The deltatable of epoch 200 is null, it should not throw exception while reverting (deltatable merge)
registry.revertToSnapshot(100);
assertTrue(set.isEmpty());
set.add("foo");
registry.getOrCreateSnapshot(300);
// After reverting to epoch 100, "bar" is not existed anymore
set.remove("bar");
// No deltatable merging is needed because nothing change in snapshot epoch 300
registry.revertToSnapshot(100);
assertTrue(set.isEmpty());
set.add("qux");
registry.getOrCreateSnapshot(400);
assertEquals(1, set.size());
set.add("fred");
set.add("thud");
registry.getOrCreateSnapshot(500);
assertEquals(3, set.size());
// remove the value in epoch 101(after epoch 100), it'll create an entry in deltatable in the snapshot of epoch 500 for the deleted value in epoch 101
set.remove("qux");
assertEquals(2, set.size());
// When reverting to snapshot of epoch 400, we'll merge the deltatable in epoch 500 with the one in epoch 400.
// The deltatable in epoch 500 has an entry created above, but the deltatable in epoch 400 is null.
// It should not throw exception while reverting (deltatable merge)
registry.revertToSnapshot(400);
// After reverting, the deltatable in epoch 500 should merge to the current epoch
assertEquals(1, set.size());
// When reverting to epoch 100, the deltatable in epoch 400 won't be merged because the entry change is epoch 101(after epoch 100)
registry.revertToSnapshot(100);
assertTrue(set.isEmpty());
}
@Test
public void testAddAndRemove() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
SnapshottableHashTable<TestElement> table =
new SnapshottableHashTable<>(registry, 1);
assertNull(table.snapshottableAddOrReplace(E_1B));
assertEquals(1, table.snapshottableSize(Long.MAX_VALUE));
registry.getOrCreateSnapshot(0);
assertSame(E_1B, table.snapshottableAddOrReplace(E_1A));
assertSame(E_1B, table.snapshottableGet(E_1A, 0));
assertSame(E_1A, table.snapshottableGet(E_1A, Long.MAX_VALUE));
assertNull(table.snapshottableAddOrReplace(E_2A));
assertNull(table.snapshottableAddOrReplace(E_3A));
assertEquals(3, table.snapshottableSize(Long.MAX_VALUE));
assertEquals(1, table.snapshottableSize(0));
registry.getOrCreateSnapshot(1);
assertEquals(E_1A, table.snapshottableRemove(E_1B));
assertEquals(E_2A, table.snapshottableRemove(E_2A));
assertEquals(E_3A, table.snapshottableRemove(E_3A));
assertEquals(0, table.snapshottableSize(Long.MAX_VALUE));
assertEquals(1, table.snapshottableSize(0));
assertEquals(3, table.snapshottableSize(1));
registry.deleteSnapshot(0);
assertEquals("No in-memory snapshot for epoch 0. Snapshot epochs are: 1",
assertThrows(RuntimeException.class, () ->
table.snapshottableSize(0)).getMessage());
registry.deleteSnapshot(1);
assertEquals(0, table.snapshottableSize(Long.MAX_VALUE));
}
@Test
public void testIterateOverSnapshot() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
SnapshottableHashTable<TestElement> table =
new SnapshottableHashTable<>(registry, 1);
assertTrue(table.snapshottableAddUnlessPresent(E_1B));
assertFalse(table.snapshottableAddUnlessPresent(E_1A));
assertTrue(table.snapshottableAddUnlessPresent(E_2A));
assertTrue(table.snapshottableAddUnlessPresent(E_3A));
registry.getOrCreateSnapshot(0);
assertIteratorYields(table.snapshottableIterator(0), E_1B, E_2A, E_3A);
assertEquals(E_1B, table.snapshottableRemove(E_1B));
assertIteratorYields(table.snapshottableIterator(0), E_1B, E_2A, E_3A);
assertNull(table.snapshottableRemove(E_1A));
assertIteratorYields(table.snapshottableIterator(Long.MAX_VALUE), E_2A, E_3A);
assertEquals(E_2A, table.snapshottableRemove(E_2A));
assertEquals(E_3A, table.snapshottableRemove(E_3A));
assertIteratorYields(table.snapshottableIterator(0), E_1B, E_2A, E_3A);
}
@Test
public void testIterateOverSnapshotWhileExpandingTable() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
SnapshottableHashTable<TestElement> table =
new SnapshottableHashTable<>(registry, 1);
assertNull(table.snapshottableAddOrReplace(E_1A));
registry.getOrCreateSnapshot(0);
Iterator<TestElement> iter = table.snapshottableIterator(0);
assertTrue(table.snapshottableAddUnlessPresent(E_2A));
assertTrue(table.snapshottableAddUnlessPresent(E_3A));
assertIteratorYields(iter, E_1A);
}
@Test
public void testIterateOverSnapshotWhileDeletingAndReplacing() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
SnapshottableHashTable<TestElement> table =
new SnapshottableHashTable<>(registry, 1);
assertNull(table.snapshottableAddOrReplace(E_1A));
assertNull(table.snapshottableAddOrReplace(E_2A));
assertNull(table.snapshottableAddOrReplace(E_3A));
assertEquals(E_1A, table.snapshottableRemove(E_1A));
assertNull(table.snapshottableAddOrReplace(E_1B));
registry.getOrCreateSnapshot(0);
Iterator<TestElement> iter = table.snapshottableIterator(0);
List<TestElement> iterElements = new ArrayList<>();
iterElements.add(iter.next());
assertEquals(E_2A, table.snapshottableRemove(E_2A));
assertEquals(E_3A, table.snapshottableAddOrReplace(E_3B));
iterElements.add(iter.next());
assertEquals(E_1B, table.snapshottableRemove(E_1B));
iterElements.add(iter.next());
assertFalse(iter.hasNext());
assertIteratorYields(iterElements.iterator(), E_1B, E_2A, E_3A);
}
@Test
public void testRevert() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
SnapshottableHashTable<TestElement> table =
new SnapshottableHashTable<>(registry, 1);
assertNull(table.snapshottableAddOrReplace(E_1A));
assertNull(table.snapshottableAddOrReplace(E_2A));
assertNull(table.snapshottableAddOrReplace(E_3A));
registry.getOrCreateSnapshot(0);
assertEquals(E_1A, table.snapshottableAddOrReplace(E_1B));
assertEquals(E_3A, table.snapshottableAddOrReplace(E_3B));
registry.getOrCreateSnapshot(1);
assertEquals(3, table.snapshottableSize(Long.MAX_VALUE));
assertIteratorYields(table.snapshottableIterator(Long.MAX_VALUE), E_1B, E_2A, E_3B);
table.snapshottableRemove(E_1B);
table.snapshottableRemove(E_2A);
table.snapshottableRemove(E_3B);
assertEquals(0, table.snapshottableSize(Long.MAX_VALUE));
assertEquals(3, table.snapshottableSize(0));
assertEquals(3, table.snapshottableSize(1));
registry.revertToSnapshot(0);
assertIteratorYields(table.snapshottableIterator(Long.MAX_VALUE), E_1A, E_2A, E_3A);
}
@Test
public void testReset() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
SnapshottableHashTable<TestElement> table =
new SnapshottableHashTable<>(registry, 1);
assertNull(table.snapshottableAddOrReplace(E_1A));
assertNull(table.snapshottableAddOrReplace(E_2A));
assertNull(table.snapshottableAddOrReplace(E_3A));
registry.getOrCreateSnapshot(0);
assertEquals(E_1A, table.snapshottableAddOrReplace(E_1B));
assertEquals(E_3A, table.snapshottableAddOrReplace(E_3B));
registry.getOrCreateSnapshot(1);
registry.reset();
assertEquals(List.of(), registry.epochsList());
// Check that the table is empty
assertIteratorYields(table.snapshottableIterator(Long.MAX_VALUE));
}
@Test
public void testIteratorAtOlderEpoch() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
SnapshottableHashTable<TestElement> table =
new SnapshottableHashTable<>(registry, 4);
assertNull(table.snapshottableAddOrReplace(E_3B));
registry.getOrCreateSnapshot(0);
assertNull(table.snapshottableAddOrReplace(E_1A));
registry.getOrCreateSnapshot(1);
assertEquals(E_1A, table.snapshottableAddOrReplace(E_1B));
registry.getOrCreateSnapshot(2);
assertEquals(E_1B, table.snapshottableRemove(E_1B));
assertIteratorYields(table.snapshottableIterator(1), E_3B, E_1A);
}
/**
* Assert that the given iterator contains the given elements, in any order.
* We compare using reference equality here, rather than object equality.
*/
private static void assertIteratorYields(Iterator<?> iter,
Object... expected) {
IdentityHashMap<Object, Boolean> remaining = new IdentityHashMap<>();
for (Object object : expected) {
remaining.put(object, true);
}
List<Object> extraObjects = new ArrayList<>();
while (iter.hasNext()) {
Object object = iter.next();
assertNotNull(object);
if (remaining.remove(object) == null) {
extraObjects.add(object);
}
}
if (!extraObjects.isEmpty() || !remaining.isEmpty()) {
throw new RuntimeException("Found extra object(s): [" + extraObjects.stream().map(Object::toString).collect(Collectors.joining(", ")) +
"] and didn't find object(s): [" + remaining.keySet().stream().map(Object::toString).collect(Collectors.joining(", ")) + "]");
}
}
}
| TestElement |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ComparableTypeTest.java | {
"start": 856,
"end": 1274
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(ComparableType.class, getClass());
@Test
public void positiveCase() {
compilationHelper
.addSourceLines(
"ComparableTypePositiveCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import java.io.Serializable;
import java.util.Comparator;
public | ComparableTypeTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java | {
"start": 1368,
"end": 2066
} | class ____ extends BucketCollector {
/** Sole constructor. */
public DeferringBucketCollector() {}
/** Set the deferred collectors. */
public abstract void setDeferredCollector(Iterable<BucketCollector> deferredCollectors);
/**
* Replay the deferred hits on the selected buckets.
*/
public abstract void prepareSelectedBuckets(LongArray selectedBuckets) throws IOException;
/**
* Wrap the provided aggregator so that it behaves (almost) as if it had
* been collected directly.
*/
public Aggregator wrap(final Aggregator in, BigArrays bigArrays) {
return new WrappedAggregator(in);
}
protected static | DeferringBucketCollector |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/LazyLoadBalancer.java | {
"start": 3884,
"end": 4180
} | class ____ extends LoadBalancer {
@Override
public Status acceptResolvedAddresses(ResolvedAddresses resolvedAddresses) {
return Status.OK;
}
@Override
public void handleNameResolutionError(Status error) {}
@Override
public void shutdown() {}
}
}
| NoopLoadBalancer |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java | {
"start": 8400,
"end": 20220
} | class ____ implements ElasticsearchClient {
protected final ElasticsearchClient client;
public ClusterAdminClient(ElasticsearchClient client) {
this.client = client;
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse> ActionFuture<Response> execute(
ActionType<Response> action,
Request request
) {
return client.execute(action, request);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse> void execute(
ActionType<Response> action,
Request request,
ActionListener<Response> listener
) {
client.execute(action, request, listener);
}
@Override
public ThreadPool threadPool() {
return client.threadPool();
}
public ActionFuture<ClusterHealthResponse> health(final ClusterHealthRequest request) {
return execute(TransportClusterHealthAction.TYPE, request);
}
public void health(final ClusterHealthRequest request, final ActionListener<ClusterHealthResponse> listener) {
execute(TransportClusterHealthAction.TYPE, request, listener);
}
public ClusterHealthRequestBuilder prepareHealth(TimeValue masterNodeTimeout, String... indices) {
return new ClusterHealthRequestBuilder(this, masterNodeTimeout).setIndices(indices);
}
public ActionFuture<ClusterStateResponse> state(final ClusterStateRequest request) {
return execute(ClusterStateAction.INSTANCE, request);
}
public void state(final ClusterStateRequest request, final ActionListener<ClusterStateResponse> listener) {
execute(ClusterStateAction.INSTANCE, request, listener);
}
public ClusterStateRequestBuilder prepareState(TimeValue masterNodeTimeout) {
return new ClusterStateRequestBuilder(this, masterNodeTimeout);
}
public ActionFuture<ClusterUpdateSettingsResponse> updateSettings(final ClusterUpdateSettingsRequest request) {
return execute(ClusterUpdateSettingsAction.INSTANCE, request);
}
public void updateSettings(final ClusterUpdateSettingsRequest request, final ActionListener<ClusterUpdateSettingsResponse> listener) {
execute(ClusterUpdateSettingsAction.INSTANCE, request, listener);
}
public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings(TimeValue masterNodeTimeout, TimeValue ackTimeout) {
return new ClusterUpdateSettingsRequestBuilder(this, masterNodeTimeout, ackTimeout);
}
public ActionFuture<NodesInfoResponse> nodesInfo(final NodesInfoRequest request) {
return execute(TransportNodesInfoAction.TYPE, request);
}
public void nodesInfo(final NodesInfoRequest request, final ActionListener<NodesInfoResponse> listener) {
execute(TransportNodesInfoAction.TYPE, request, listener);
}
public NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds) {
return new NodesInfoRequestBuilder(this, nodesIds);
}
public void clusterStats(ClusterStatsRequest request, ActionListener<ClusterStatsResponse> listener) {
execute(TransportClusterStatsAction.TYPE, request, listener);
}
public ClusterStatsRequestBuilder prepareClusterStats() {
return new ClusterStatsRequestBuilder(this);
}
public ClusterStatsRequestBuilder prepareClusterStats(boolean isCPS) {
return new ClusterStatsRequestBuilder(this, isCPS);
}
public ActionFuture<NodesStatsResponse> nodesStats(final NodesStatsRequest request) {
return execute(TransportNodesStatsAction.TYPE, request);
}
public void nodesStats(final NodesStatsRequest request, final ActionListener<NodesStatsResponse> listener) {
execute(TransportNodesStatsAction.TYPE, request, listener);
}
public NodesStatsRequestBuilder prepareNodesStats(String... nodesIds) {
return new NodesStatsRequestBuilder(this, nodesIds);
}
public ActionFuture<NodesCapabilitiesResponse> nodesCapabilities(final NodesCapabilitiesRequest request) {
return execute(TransportNodesCapabilitiesAction.TYPE, request);
}
public void nodesCapabilities(final NodesCapabilitiesRequest request, final ActionListener<NodesCapabilitiesResponse> listener) {
execute(TransportNodesCapabilitiesAction.TYPE, request, listener);
}
public void nodesUsage(final NodesUsageRequest request, final ActionListener<NodesUsageResponse> listener) {
execute(TransportNodesUsageAction.TYPE, request, listener);
}
public ActionFuture<ListTasksResponse> listTasks(final ListTasksRequest request) {
return execute(TransportListTasksAction.TYPE, request);
}
public void listTasks(final ListTasksRequest request, final ActionListener<ListTasksResponse> listener) {
execute(TransportListTasksAction.TYPE, request, listener);
}
public ListTasksRequestBuilder prepareListTasks(String... nodesIds) {
return new ListTasksRequestBuilder(this).setNodesIds(nodesIds);
}
public ActionFuture<GetTaskResponse> getTask(final GetTaskRequest request) {
return execute(TransportGetTaskAction.TYPE, request);
}
public void getTask(final GetTaskRequest request, final ActionListener<GetTaskResponse> listener) {
execute(TransportGetTaskAction.TYPE, request, listener);
}
public GetTaskRequestBuilder prepareGetTask(String taskId) {
return prepareGetTask(new TaskId(taskId));
}
public GetTaskRequestBuilder prepareGetTask(TaskId taskId) {
return new GetTaskRequestBuilder(this).setTaskId(taskId);
}
public ActionFuture<ListTasksResponse> cancelTasks(CancelTasksRequest request) {
return execute(TransportCancelTasksAction.TYPE, request);
}
public void cancelTasks(CancelTasksRequest request, ActionListener<ListTasksResponse> listener) {
execute(TransportCancelTasksAction.TYPE, request, listener);
}
public CancelTasksRequestBuilder prepareCancelTasks(String... nodesIds) {
return new CancelTasksRequestBuilder(this).setNodesIds(nodesIds);
}
public void putRepository(PutRepositoryRequest request, ActionListener<AcknowledgedResponse> listener) {
execute(TransportPutRepositoryAction.TYPE, request, listener);
}
public PutRepositoryRequestBuilder preparePutRepository(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) {
return new PutRepositoryRequestBuilder(this, masterNodeTimeout, ackTimeout, name);
}
public void deleteRepository(DeleteRepositoryRequest request, ActionListener<AcknowledgedResponse> listener) {
execute(TransportDeleteRepositoryAction.TYPE, request, listener);
}
public DeleteRepositoryRequestBuilder prepareDeleteRepository(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) {
return new DeleteRepositoryRequestBuilder(this, masterNodeTimeout, ackTimeout, name);
}
public void getRepositories(GetRepositoriesRequest request, ActionListener<GetRepositoriesResponse> listener) {
execute(GetRepositoriesAction.INSTANCE, request, listener);
}
public GetRepositoriesRequestBuilder prepareGetRepositories(TimeValue masterNodeTimeout, String... name) {
return new GetRepositoriesRequestBuilder(this, masterNodeTimeout, name);
}
public CleanupRepositoryRequestBuilder prepareCleanupRepository(TimeValue masterNodeTimeout, TimeValue ackTimeout, String repository) {
return new CleanupRepositoryRequestBuilder(this, masterNodeTimeout, ackTimeout, repository);
}
public void cleanupRepository(CleanupRepositoryRequest request, ActionListener<CleanupRepositoryResponse> listener) {
execute(TransportCleanupRepositoryAction.TYPE, request, listener);
}
public void verifyRepository(VerifyRepositoryRequest request, ActionListener<VerifyRepositoryResponse> listener) {
execute(VerifyRepositoryAction.INSTANCE, request, listener);
}
public VerifyRepositoryRequestBuilder prepareVerifyRepository(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) {
return new VerifyRepositoryRequestBuilder(this, masterNodeTimeout, ackTimeout, name);
}
public ActionFuture<CreateSnapshotResponse> createSnapshot(CreateSnapshotRequest request) {
return execute(TransportCreateSnapshotAction.TYPE, request);
}
public void createSnapshot(CreateSnapshotRequest request, ActionListener<CreateSnapshotResponse> listener) {
execute(TransportCreateSnapshotAction.TYPE, request, listener);
}
public CreateSnapshotRequestBuilder prepareCreateSnapshot(TimeValue masterNodeTimeout, String repository, String name) {
return new CreateSnapshotRequestBuilder(this, masterNodeTimeout, repository, name);
}
public CloneSnapshotRequestBuilder prepareCloneSnapshot(TimeValue masterNodeTimeout, String repository, String source, String target) {
return new CloneSnapshotRequestBuilder(this, masterNodeTimeout, repository, source, target);
}
public void cloneSnapshot(CloneSnapshotRequest request, ActionListener<AcknowledgedResponse> listener) {
execute(TransportCloneSnapshotAction.TYPE, request, listener);
}
public void getSnapshots(GetSnapshotsRequest request, ActionListener<GetSnapshotsResponse> listener) {
execute(TransportGetSnapshotsAction.TYPE, request, listener);
}
public GetSnapshotsRequestBuilder prepareGetSnapshots(TimeValue masterNodeTimeout, String... repositories) {
return new GetSnapshotsRequestBuilder(this, masterNodeTimeout, repositories);
}
public void deleteSnapshot(DeleteSnapshotRequest request, ActionListener<AcknowledgedResponse> listener) {
execute(TransportDeleteSnapshotAction.TYPE, request, listener);
}
public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(TimeValue masterNodeTimeout, String repository, String... names) {
return new DeleteSnapshotRequestBuilder(this, masterNodeTimeout, repository, names);
}
public ActionFuture<RestoreSnapshotResponse> restoreSnapshot(RestoreSnapshotRequest request) {
return execute(TransportRestoreSnapshotAction.TYPE, request);
}
public void restoreSnapshot(RestoreSnapshotRequest request, ActionListener<RestoreSnapshotResponse> listener) {
execute(TransportRestoreSnapshotAction.TYPE, request, listener);
}
public RestoreSnapshotRequestBuilder prepareRestoreSnapshot(TimeValue masterNodeTimeout, String repository, String snapshot) {
return new RestoreSnapshotRequestBuilder(this, masterNodeTimeout, repository, snapshot);
}
public void snapshotsStatus(SnapshotsStatusRequest request, ActionListener<SnapshotsStatusResponse> listener) {
execute(TransportSnapshotsStatusAction.TYPE, request, listener);
}
public SnapshotsStatusRequestBuilder prepareSnapshotStatus(TimeValue masterNodeTimeout, String repository) {
return new SnapshotsStatusRequestBuilder(this, masterNodeTimeout, repository);
}
public SnapshotsStatusRequestBuilder prepareSnapshotStatus(TimeValue masterNodeTimeout) {
return new SnapshotsStatusRequestBuilder(this, masterNodeTimeout);
}
public void simulatePipeline(SimulatePipelineRequest request, ActionListener<SimulatePipelineResponse> listener) {
execute(SimulatePipelineAction.INSTANCE, request, listener);
}
public ActionFuture<SimulatePipelineResponse> simulatePipeline(SimulatePipelineRequest request) {
return execute(SimulatePipelineAction.INSTANCE, request);
}
public SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source, XContentType xContentType) {
return new SimulatePipelineRequestBuilder(this, source, xContentType);
}
}
| ClusterAdminClient |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultiFileInputFormat.java | {
"start": 1959,
"end": 5353
} | class ____ extends MultiFileInputFormat<Text, Text> {
@Override
public RecordReader<Text,Text> getRecordReader(InputSplit split, JobConf job
, Reporter reporter) throws IOException {
return null;
}
}
private Path initFiles(FileSystem fs, int numFiles, int numBytes) throws IOException{
Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
Path multiFileDir = new Path(dir, "test.multifile");
fs.delete(multiFileDir, true);
fs.mkdirs(multiFileDir);
LOG.info("Creating " + numFiles + " file(s) in " + multiFileDir);
for(int i=0; i<numFiles ;i++) {
Path path = new Path(multiFileDir, "file_" + i);
FSDataOutputStream out = fs.create(path);
if (numBytes == -1) {
numBytes = rand.nextInt(MAX_BYTES);
}
for(int j=0; j< numBytes; j++) {
out.write(rand.nextInt());
}
out.close();
if(LOG.isDebugEnabled()) {
LOG.debug("Created file " + path + " with length " + numBytes);
}
lengths.put(path.getName(), new Long(numBytes));
}
FileInputFormat.setInputPaths(job, multiFileDir);
return multiFileDir;
}
@Test
public void testFormat() throws IOException {
LOG.info("Test started");
LOG.info("Max split count = " + MAX_SPLIT_COUNT);
LOG.info("Split count increment = " + SPLIT_COUNT_INCR);
LOG.info("Max bytes per file = " + MAX_BYTES);
LOG.info("Max number of files = " + MAX_NUM_FILES);
LOG.info("Number of files increment = " + NUM_FILES_INCR);
MultiFileInputFormat<Text,Text> format = new DummyMultiFileInputFormat();
FileSystem fs = FileSystem.getLocal(job);
for(int numFiles = 1; numFiles< MAX_NUM_FILES ;
numFiles+= (NUM_FILES_INCR / 2) + rand.nextInt(NUM_FILES_INCR / 2)) {
Path dir = initFiles(fs, numFiles, -1);
BitSet bits = new BitSet(numFiles);
for(int i=1;i< MAX_SPLIT_COUNT ;i+= rand.nextInt(SPLIT_COUNT_INCR) + 1) {
LOG.info("Running for Num Files=" + numFiles + ", split count=" + i);
MultiFileSplit[] splits = (MultiFileSplit[])format.getSplits(job, i);
bits.clear();
for(MultiFileSplit split : splits) {
long splitLength = 0;
for(Path p : split.getPaths()) {
long length = fs.getContentSummary(p).getLength();
assertEquals(length, lengths.get(p.getName()).longValue());
splitLength += length;
String name = p.getName();
int index = Integer.parseInt(
name.substring(name.lastIndexOf("file_") + 5));
assertFalse(bits.get(index));
bits.set(index);
}
assertEquals(splitLength, split.getLength());
}
}
assertEquals(bits.cardinality(), numFiles);
fs.delete(dir, true);
}
LOG.info("Test Finished");
}
@Test
public void testFormatWithLessPathsThanSplits() throws Exception {
MultiFileInputFormat<Text,Text> format = new DummyMultiFileInputFormat();
FileSystem fs = FileSystem.getLocal(job);
// Test with no path
initFiles(fs, 0, -1);
assertEquals(0, format.getSplits(job, 2).length);
// Test with 2 path and 4 splits
initFiles(fs, 2, 500);
assertEquals(2, format.getSplits(job, 4).length);
}
}
| DummyMultiFileInputFormat |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/slot/TaskSlotPayload.java | {
"start": 1184,
"end": 1589
} | interface ____ {
JobID getJobID();
ExecutionAttemptID getExecutionId();
AllocationID getAllocationId();
CompletableFuture<?> getTerminationFuture();
/**
* Fail the payload with the given throwable. This operation should eventually complete the
* termination future.
*
* @param cause of the failure
*/
void failExternally(Throwable cause);
}
| TaskSlotPayload |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/remote/request/SetupAckRequest.java | {
"start": 895,
"end": 1457
} | class ____ extends ServerRequest {
private Map<String, Boolean> abilityTable;
public SetupAckRequest() {
}
public SetupAckRequest(Map<String, Boolean> abilityTable) {
this.abilityTable = abilityTable;
}
public Map<String, Boolean> getAbilityTable() {
return abilityTable;
}
public void setAbilityTable(Map<String, Boolean> abilityTable) {
this.abilityTable = abilityTable;
}
@Override
public String getModule() {
return INTERNAL_MODULE;
}
}
| SetupAckRequest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/codec/CodecTests.java | {
"start": 2033,
"end": 7310
} | class ____ extends ESTestCase {
public void testResolveDefaultCodecs() throws Exception {
assumeTrue("Only when zstd_stored_fields feature flag is enabled", CodecService.ZSTD_STORED_FIELDS_FEATURE_FLAG);
CodecService codecService = createCodecService();
assertThat(codecService.codec("default"), instanceOf(PerFieldMapperCodec.class));
assertThat(codecService.codec("default"), instanceOf(Elasticsearch92Lucene103Codec.class));
}
public void testDefault() throws Exception {
assumeTrue("Only when zstd_stored_fields feature flag is enabled", CodecService.ZSTD_STORED_FIELDS_FEATURE_FLAG);
Codec codec = createCodecService().codec("default");
assertEquals(
"Zstd814StoredFieldsFormat(compressionMode=ZSTD(level=1), chunkSize=14336, maxDocsPerChunk=128, blockShift=10)",
codec.storedFieldsFormat().toString()
);
}
public void testBestCompression() throws Exception {
Codec codec = createCodecService().codec("best_compression");
assertEquals(
"Zstd814StoredFieldsFormat(compressionMode=ZSTD(level=3), chunkSize=245760, maxDocsPerChunk=2048, blockShift=10)",
codec.storedFieldsFormat().toString()
);
}
public void testLegacyDefault() throws Exception {
Codec codec = createCodecService().codec("legacy_default");
assertThat(codec.storedFieldsFormat(), Matchers.instanceOf(Lucene90StoredFieldsFormat.class));
// Make sure the legacy codec is writable
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setCodec(codec))) {
Document doc = new Document();
doc.add(new KeywordField("string_field", "abc", Field.Store.YES));
doc.add(new IntField("int_field", 42, Field.Store.YES));
w.addDocument(doc);
try (DirectoryReader r = DirectoryReader.open(w)) {}
}
}
public void testLegacyBestCompression() throws Exception {
Codec codec = createCodecService().codec("legacy_best_compression");
assertThat(codec.storedFieldsFormat(), Matchers.instanceOf(Lucene90StoredFieldsFormat.class));
// Make sure the legacy codec is writable
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setCodec(codec))) {
Document doc = new Document();
doc.add(new KeywordField("string_field", "abc", Field.Store.YES));
doc.add(new IntField("int_field", 42, Field.Store.YES));
w.addDocument(doc);
try (DirectoryReader r = DirectoryReader.open(w)) {}
}
}
public void testCodecRetrievalForUnknownCodec() throws Exception {
CodecService codecService = createCodecService();
IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> codecService.codec("unknown_codec"));
assertEquals("failed to find codec [unknown_codec]", exception.getMessage());
}
public void testAvailableCodecsContainsExpectedCodecs() throws Exception {
CodecService codecService = createCodecService();
String[] availableCodecs = codecService.availableCodecs();
List<String> codecList = Arrays.asList(availableCodecs);
int expectedCodecCount = Codec.availableCodecs().size() + 5;
assertTrue(codecList.contains(CodecService.DEFAULT_CODEC));
assertTrue(codecList.contains(CodecService.LEGACY_DEFAULT_CODEC));
assertTrue(codecList.contains(CodecService.BEST_COMPRESSION_CODEC));
assertTrue(codecList.contains(CodecService.LEGACY_BEST_COMPRESSION_CODEC));
assertTrue(codecList.contains(CodecService.LUCENE_DEFAULT_CODEC));
assertFalse(codecList.contains("unknown_codec"));
assertEquals(expectedCodecCount, availableCodecs.length);
}
private CodecService createCodecService() throws IOException {
Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build();
IndexSettings settings = IndexSettingsModule.newIndexSettings("_na", nodeSettings);
SimilarityService similarityService = new SimilarityService(settings, null, Collections.emptyMap());
IndexAnalyzers indexAnalyzers = createTestAnalysis(settings, nodeSettings).indexAnalyzers;
MapperRegistry mapperRegistry = new MapperRegistry(
Collections.emptyMap(),
Collections.emptyMap(),
Collections.emptyMap(),
MapperPlugin.NOOP_FIELD_FILTER,
null
);
BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, BitsetFilterCache.Listener.NOOP);
MapperService service = new MapperService(
() -> TransportVersion.current(),
settings,
indexAnalyzers,
parserConfig(),
similarityService,
mapperRegistry,
() -> null,
settings.getMode().idFieldMapperWithoutFieldData(),
ScriptCompiler.NONE,
bitsetFilterCache::getBitSetProducer,
MapperMetrics.NOOP
);
return new CodecService(service, BigArrays.NON_RECYCLING_INSTANCE);
}
}
| CodecTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/EventHubsEndpointBuilderFactory.java | {
"start": 47420,
"end": 49911
} | interface ____ {
/**
* Azure Event Hubs (camel-azure-eventhubs)
* Send and receive events to/from Azure Event Hubs using AMQP protocol.
*
* Category: cloud,messaging
* Since: 3.5
* Maven coordinates: org.apache.camel:camel-azure-eventhubs
*
* @return the dsl builder for the headers' name.
*/
default EventHubsHeaderNameBuilder azureEventhubs() {
return EventHubsHeaderNameBuilder.INSTANCE;
}
/**
* Azure Event Hubs (camel-azure-eventhubs)
* Send and receive events to/from Azure Event Hubs using AMQP protocol.
*
* Category: cloud,messaging
* Since: 3.5
* Maven coordinates: org.apache.camel:camel-azure-eventhubs
*
* Syntax: <code>azure-eventhubs:namespace/eventHubName</code>
*
* Path parameter: namespace
* EventHubs namespace created in Azure Portal.
*
* Path parameter: eventHubName
* EventHubs name under a specific namespace.
*
* @param path namespace/eventHubName
* @return the dsl builder
*/
default EventHubsEndpointBuilder azureEventhubs(String path) {
return EventHubsEndpointBuilderFactory.endpointBuilder("azure-eventhubs", path);
}
/**
* Azure Event Hubs (camel-azure-eventhubs)
* Send and receive events to/from Azure Event Hubs using AMQP protocol.
*
* Category: cloud,messaging
* Since: 3.5
* Maven coordinates: org.apache.camel:camel-azure-eventhubs
*
* Syntax: <code>azure-eventhubs:namespace/eventHubName</code>
*
* Path parameter: namespace
* EventHubs namespace created in Azure Portal.
*
* Path parameter: eventHubName
* EventHubs name under a specific namespace.
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path namespace/eventHubName
* @return the dsl builder
*/
default EventHubsEndpointBuilder azureEventhubs(String componentName, String path) {
return EventHubsEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Azure Event Hubs component.
*/
public static | EventHubsBuilders |
java | quarkusio__quarkus | independent-projects/tools/registry-client/src/main/java/io/quarkus/registry/config/RegistryDescriptorConfigImpl.java | {
"start": 699,
"end": 1884
} | class ____ implements RegistryDescriptorConfig {
private final ArtifactCoords artifact;
@JsonIgnore
private final boolean generated;
// Package private. Used when filling in defaults (that shouldn't be persisted), too
RegistryDescriptorConfigImpl(ArtifactCoords artifact, boolean generated) {
this.artifact = artifact;
this.generated = generated;
}
@Override
public ArtifactCoords getArtifact() {
return artifact;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (!(o instanceof RegistryDescriptorConfig))
return false;
RegistryDescriptorConfig that = (RegistryDescriptorConfig) o;
return Objects.equals(artifact, that.getArtifact());
}
@Override
public int hashCode() {
return Objects.hash(artifact);
}
@Override
public String toString() {
return this.getClass().getSimpleName() +
"{artifact=" + artifact +
'}';
}
/**
* Builder.
* {@literal set*} methods are used for deserialization
*/
public static | RegistryDescriptorConfigImpl |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/AppendableJoinerTest.java | {
"start": 1455,
"end": 5648
} | class ____ {
private final String name;
Fixture(final String name) {
this.name = name;
}
/**
* Renders myself onto an Appendable to avoid creating intermediary strings.
*/
void render(final Appendable appendable) throws IOException {
appendable.append(name);
appendable.append('!');
}
}
@Test
void testAllBuilderPropertiesStringBuilder() {
// @formatter:off
final AppendableJoiner<Object> joiner = AppendableJoiner.builder()
.setPrefix("<")
.setDelimiter(".")
.setSuffix(">")
.setElementAppender((a, e) -> a.append(String.valueOf(e)))
.get();
// @formatter:on
final StringBuilder sbuilder = new StringBuilder("A");
assertEquals("A<B.C>", joiner.join(sbuilder, "B", "C").toString());
sbuilder.append("1");
assertEquals("A<B.C>1<D.E>", joiner.join(sbuilder, Arrays.asList("D", "E")).toString());
}
@Test
void testBuildDefaultStringBuilder() {
final Builder<Object> builder = AppendableJoiner.builder();
assertNotSame(builder.get(), builder.get());
final AppendableJoiner<Object> joiner = builder.get();
final StringBuilder sbuilder = new StringBuilder("A");
assertEquals("ABC", joiner.join(sbuilder, "B", "C").toString());
sbuilder.append("1");
assertEquals("ABC1DE", joiner.join(sbuilder, "D", "E").toString());
}
@Test
void testBuilder() {
assertNotSame(AppendableJoiner.builder(), AppendableJoiner.builder());
}
@SuppressWarnings("deprecation") // Test own StrBuilder
@ParameterizedTest
@ValueSource(classes = { StringBuilder.class, StringBuffer.class, StringWriter.class, StrBuilder.class, TextStringBuilder.class })
void testDelimiterAppendable(final Class<? extends Appendable> clazz) throws Exception {
final AppendableJoiner<Object> joiner = AppendableJoiner.builder().setDelimiter(".").get();
final Appendable sbuilder = clazz.newInstance();
sbuilder.append("A");
// throws IOException
assertEquals("AB.C", joiner.joinA(sbuilder, "B", "C").toString());
sbuilder.append("1");
// throws IOException
assertEquals("AB.C1D.E", joiner.joinA(sbuilder, Arrays.asList("D", "E")).toString());
}
@Test
void testDelimiterStringBuilder() {
final AppendableJoiner<Object> joiner = AppendableJoiner.builder().setDelimiter(".").get();
final StringBuilder sbuilder = new StringBuilder("A");
// does not throw IOException
assertEquals("AB.C", joiner.join(sbuilder, "B", "C").toString());
sbuilder.append("1");
// does not throw IOException
assertEquals("AB.C1D.E", joiner.join(sbuilder, Arrays.asList("D", "E")).toString());
}
@Test
void testToCharSequenceStringBuilder1() {
// @formatter:off
final AppendableJoiner<Object> joiner = AppendableJoiner.builder()
.setPrefix("<")
.setDelimiter(".")
.setSuffix(">")
.setElementAppender((a, e) -> a.append("|").append(Objects.toString(e)))
.get();
// @formatter:on
final StringBuilder sbuilder = new StringBuilder("A");
assertEquals("A<|B.|C>", joiner.join(sbuilder, "B", "C").toString());
sbuilder.append("1");
assertEquals("A<|B.|C>1<|D.|E>", joiner.join(sbuilder, Arrays.asList("D", "E")).toString());
}
@Test
void testToCharSequenceStringBuilder2() {
// @formatter:off
final AppendableJoiner<Fixture> joiner = AppendableJoiner.<Fixture>builder()
.setElementAppender((a, e) -> e.render(a))
.get();
// @formatter:on
final StringBuilder sbuilder = new StringBuilder("[");
assertEquals("[B!C!", joiner.join(sbuilder, new Fixture("B"), new Fixture("C")).toString());
sbuilder.append("]");
assertEquals("[B!C!]D!E!", joiner.join(sbuilder, Arrays.asList(new Fixture("D"), new Fixture("E"))).toString());
}
}
| Fixture |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/api/operators/StreamFlatMap.java | {
"start": 1135,
"end": 1836
} | class ____<IN, OUT> extends AbstractUdfStreamOperator<OUT, FlatMapFunction<IN, OUT>>
implements OneInputStreamOperator<IN, OUT> {
private static final long serialVersionUID = 1L;
private transient TimestampedCollector<OUT> collector;
public StreamFlatMap(FlatMapFunction<IN, OUT> flatMapper) {
super(flatMapper);
}
@Override
public void open() throws Exception {
super.open();
collector = new TimestampedCollector<>(output);
}
@Override
public void processElement(StreamRecord<IN> element) throws Exception {
collector.setTimestamp(element);
userFunction.flatMap(element.getValue(), collector);
}
}
| StreamFlatMap |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ModifyingCollectionWithItselfTest.java | {
"start": 884,
"end": 1498
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(ModifyingCollectionWithItself.class, getClass());
@Test
public void positiveCases1() {
compilationHelper
.addSourceLines(
"ModifyingCollectionWithItselfPositiveCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import java.util.ArrayList;
import java.util.List;
/**
* @author scottjohnson@google.com (Scott Johnson)
*/
public | ModifyingCollectionWithItselfTest |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RedissonReactiveClient.java | {
"start": 858,
"end": 1027
} | interface ____ access
* to all redisson objects with Reactive interface.
*
* @see RedissonRxClient
* @see RedissonClient
*
* @author Nikita Koksharov
*
*/
public | for |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxMergeSequentialTest.java | {
"start": 1932,
"end": 27972
} | class ____ {
AssertSubscriber<Object> ts;
AssertSubscriber<Object> tsBp;
final Function<Integer, Flux<Integer>> toJust = Flux::just;
final Function<Integer, Flux<Integer>> toRange = t -> Flux.range(t, 2);
@BeforeEach
public void before() {
ts = new AssertSubscriber<>();
tsBp = new AssertSubscriber<>(0L);
}
@Test
public void normal() {
StepVerifier.create(Flux.range(1, 5)
.hide()
.flatMapSequential(t -> Flux.range(t, 2)))
.expectNoFusionSupport()
.expectNext(1, 2, 2, 3, 3, 4, 4, 5, 5, 6)
.verifyComplete();
}
@Test
public void normalBackpressured() {
AssertSubscriber<Integer> ts = Flux.range(1, 5)
.hide()
.flatMapSequential(t -> Flux.range(t, 2))
.subscribeWith(AssertSubscriber.create(3));
ts.assertValues(1, 2, 2);
ts.request(1);
ts.assertValues(1, 2, 2, 3);
ts.request(1);
ts.assertValues(1, 2, 2, 3, 3);
ts.request(5);
ts.assertComplete().assertValues(1, 2, 2, 3, 3, 4, 4, 5, 5, 6);
}
@Test
public void normalDelayEnd() {
Flux.range(1, 5)
.flatMapSequentialDelayError(t -> Flux.range(t, 2), 32, 32)
.subscribeWith(AssertSubscriber.create())
.assertComplete().assertValues(1, 2, 2, 3, 3, 4, 4, 5, 5, 6);
}
@Test
public void normalDelayEndBackpressured() {
AssertSubscriber<Integer> ts = Flux.range(1, 5)
.flatMapSequentialDelayError(t -> Flux.range(t, 2), 32, 32)
.subscribeWith(AssertSubscriber.create(3));
ts.assertValues(1, 2, 2);
ts.request(1);
ts.assertValues(1, 2, 2, 3);
ts.request(1);
ts.assertValues(1, 2, 2, 3, 3);
ts.request(5);
ts.assertComplete().assertValues(1, 2, 2, 3, 3, 4, 4, 5, 5, 6);
}
@Test
public void mainErrorsDelayEnd() {
Sinks.Many<Integer> main = Sinks.unsafe().many().multicast().directBestEffort();
final Sinks.Many<Integer> inner = Sinks.unsafe().many().multicast().directBestEffort();
AssertSubscriber<Integer> ts = main.asFlux()
.flatMapSequentialDelayError(t -> inner.asFlux(), 32, 32)
.subscribeWith(AssertSubscriber.create());
main.emitNext(1, FAIL_FAST);
main.emitNext(2, FAIL_FAST);
inner.emitNext(2, FAIL_FAST);
ts.assertValues(2);
main.emitError(new RuntimeException("Forced failure"), FAIL_FAST);
ts.assertNoError();
inner.emitNext(3, FAIL_FAST);
inner.emitComplete(FAIL_FAST);
ts.assertValues(2, 3, 2, 3)
.assertErrorMessage("Forced failure");
}
@Test
public void mainErrorsImmediate() {
Sinks.Many<Integer> main = Sinks.unsafe().many().multicast().directBestEffort();
final Sinks.Many<Integer> inner = Sinks.unsafe().many().multicast().directBestEffort();
AssertSubscriber<Integer> ts = main.asFlux().flatMapSequential(t -> inner.asFlux())
.subscribeWith(AssertSubscriber.create());
main.emitNext(1, FAIL_FAST);
main.emitNext(2, FAIL_FAST);
inner.emitNext(2, FAIL_FAST);
ts.assertValues(2);
main.emitError(new RuntimeException("Forced failure"), FAIL_FAST);
assertThat(inner.currentSubscriberCount()).as("inner has subscriber").isZero();
inner.emitNext(3, FAIL_FAST);
inner.emitComplete(FAIL_FAST);
ts.assertValues(2).assertErrorMessage("Forced failure");
}
@Test
public void longEager() {
Flux.range(1, 2 * Queues.SMALL_BUFFER_SIZE)
.flatMapSequential(v -> Flux.just(1))
.subscribeWith(AssertSubscriber.create())
.assertValueCount(2 * Queues.SMALL_BUFFER_SIZE)
.assertNoError()
.assertComplete();
}
@Test
public void testSimple() {
Flux.range(1, 100).flatMapSequential(toJust).subscribe(ts);
ts.assertNoError();
ts.assertValueCount(100);
ts.assertComplete();
}
@Test
public void testSimple2() {
Flux.range(1, 100).flatMapSequential(toRange).subscribe(ts);
ts.assertNoError();
ts.assertValueCount(200);
ts.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness2() {
final AtomicInteger count = new AtomicInteger();
Flux<Integer> source = Flux.just(1).doOnNext(t -> count.getAndIncrement()).hide();
Flux.mergeSequential(source, source).subscribe(tsBp);
assertThat(count).hasValue(2);
tsBp.assertNoError();
tsBp.assertNotComplete();
tsBp.assertNoValues();
tsBp.request(Long.MAX_VALUE);
tsBp.assertValueCount(count.get());
tsBp.assertNoError();
tsBp.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness3() {
final AtomicInteger count = new AtomicInteger();
Flux<Integer> source = Flux.just(1).doOnNext(t -> count.getAndIncrement()).hide();
Flux.mergeSequential(source, source, source).subscribe(tsBp);
assertThat(count).hasValue(3);
tsBp.assertNoError();
tsBp.assertNotComplete();
tsBp.assertNoValues();
tsBp.request(Long.MAX_VALUE);
tsBp.assertValueCount(count.get());
tsBp.assertNoError();
tsBp.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness4() {
final AtomicInteger count = new AtomicInteger();
Flux<Integer> source = Flux.just(1).doOnNext(t -> count.getAndIncrement()).hide();
Flux.mergeSequential(source, source, source, source).subscribe(tsBp);
assertThat(count).hasValue(4);
tsBp.assertNoError();
tsBp.assertNotComplete();
tsBp.assertNoValues();
tsBp.request(Long.MAX_VALUE);
tsBp.assertValueCount(count.get());
tsBp.assertNoError();
tsBp.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness5() {
final AtomicInteger count = new AtomicInteger();
Flux<Integer> source = Flux.just(1).doOnNext(t -> count.getAndIncrement()).hide();
Flux.mergeSequential(source, source, source, source, source).subscribe(tsBp);
assertThat(count).hasValue(5);
tsBp.assertNoError();
tsBp.assertNotComplete();
tsBp.assertNoValues();
tsBp.request(Long.MAX_VALUE);
tsBp.assertValueCount(count.get());
tsBp.assertNoError();
tsBp.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness6() {
final AtomicInteger count = new AtomicInteger();
Flux<Integer> source = Flux.just(1).doOnNext(t -> count.getAndIncrement()).hide();
Flux.mergeSequential(source, source, source, source, source, source).subscribe(tsBp);
assertThat(count).hasValue(6);
tsBp.assertNoError();
tsBp.assertNotComplete();
tsBp.assertNoValues();
tsBp.request(Long.MAX_VALUE);
tsBp.assertValueCount(count.get());
tsBp.assertNoError();
tsBp.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness7() {
final AtomicInteger count = new AtomicInteger();
Flux<Integer> source = Flux.just(1).doOnNext(t -> count.getAndIncrement()).hide();
Flux.mergeSequential(source, source, source, source, source, source, source).subscribe(tsBp);
assertThat(count).hasValue(7);
tsBp.assertNoError();
tsBp.assertNotComplete();
tsBp.assertNoValues();
tsBp.request(Long.MAX_VALUE);
tsBp.assertValueCount(count.get());
tsBp.assertNoError();
tsBp.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness8() {
final AtomicInteger count = new AtomicInteger();
Flux<Integer> source = Flux.just(1).doOnNext(t -> count.getAndIncrement()).hide();
Flux.mergeSequential(source, source, source, source, source, source, source, source).subscribe(tsBp);
assertThat(count).hasValue(8);
tsBp.assertNoError();
tsBp.assertNotComplete();
tsBp.assertNoValues();
tsBp.request(Long.MAX_VALUE);
tsBp.assertValueCount(count.get());
tsBp.assertNoError();
tsBp.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness9() {
final AtomicInteger count = new AtomicInteger();
Flux<Integer> source = Flux.just(1).doOnNext(t -> count.getAndIncrement()).hide();
Flux.mergeSequential(source, source, source, source, source, source, source, source, source).subscribe(tsBp);
assertThat(count).hasValue(9);
tsBp.assertNoError();
tsBp.assertNotComplete();
tsBp.assertNoValues();
tsBp.request(Long.MAX_VALUE);
tsBp.assertValueCount(count.get());
tsBp.assertNoError();
tsBp.assertComplete();
}
@Test
public void testMainError() {
Flux.<Integer>error(new RuntimeException()).flatMapSequential(toJust).subscribe(ts);
ts.assertNoValues();
ts.assertError(RuntimeException.class);
ts.assertNotComplete();
}
@Test
public void testInnerErrorWithDroppedError() {
final AtomicInteger count = new AtomicInteger();
Flux.range(0, 3)
.flatMapSequential(i -> Mono.defer(() -> {
throw new RuntimeException("forced failure");
}))
.onErrorContinue((t, v) -> {
if (t.getMessage().contains("forced failure")) {
count.incrementAndGet();
}
})
.subscribe();
Assertions.assertEquals(3, count.get());
}
@SuppressWarnings("unchecked")
@Test
public void testInnerError() {
Flux.mergeSequential(Flux.just(1), Flux.error(new RuntimeException())).subscribe(ts);
ts.assertValues(1);
ts.assertError(RuntimeException.class);
ts.assertNotComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testInnerEmpty() {
Flux.mergeSequential(Flux.empty(), Flux.empty()).subscribe(ts);
ts.assertNoValues();
ts.assertNoError();
ts.assertComplete();
}
@Test
public void testMapperThrows() {
Flux.just(1).flatMapSequential(t -> { throw new RuntimeException(); }).subscribe(ts);
ts.assertNoValues();
ts.assertNotComplete();
ts.assertError(RuntimeException.class);
}
@Test
public void testInvalidCapacityHint() {
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> {
Flux.just(1).flatMapSequential(toJust, 0, Queues.SMALL_BUFFER_SIZE);
});
}
@Test
public void testInvalidMaxConcurrent() {
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> {
Flux.just(1).flatMapSequential(toJust, Queues.SMALL_BUFFER_SIZE, 0);
});
}
@Test
@SuppressWarnings("unchecked")
public void testBackpressure() {
Flux.mergeSequential(Flux.just(1), Flux.just(1)).subscribe(tsBp);
tsBp.assertNoError();
tsBp.assertNoValues();
tsBp.assertNotComplete();
tsBp.request(1);
tsBp.assertValues(1);
tsBp.assertNoError();
tsBp.assertNotComplete();
tsBp.request(1);
tsBp.assertValues(1, 1);
tsBp.assertNoError();
tsBp.assertComplete();
}
@Test
public void testAsynchronousRun() {
Flux.range(1, 2).flatMapSequential(t -> Flux.range(1, 1000)
.subscribeOn(Schedulers.single())
).publishOn(Schedulers.boundedElastic()).subscribe(ts);
ts.await(Duration.ofSeconds(5));
ts.assertNoError();
ts.assertValueCount(2000);
}
@Test
public void testReentrantWork() {
final Sinks.Many<Integer> subject = Sinks.unsafe().many().multicast().directBestEffort();
final AtomicBoolean once = new AtomicBoolean();
subject.asFlux()
.flatMapSequential(Flux::just)
.doOnNext(t -> {
if (once.compareAndSet(false, true)) {
subject.emitNext(2, FAIL_FAST);
}
})
.subscribe(ts);
subject.emitNext(1, FAIL_FAST);
ts.assertNoError();
ts.assertNotComplete();
ts.assertValues(1, 2);
}
@Test
public void testPrefetchIsBounded() {
final AtomicInteger count = new AtomicInteger();
AssertSubscriber<Object> ts = AssertSubscriber.create(0);
Flux.just(1).hide()
.flatMapSequential(t -> Flux.range(1, Queues.SMALL_BUFFER_SIZE * 2)
.doOnNext(t1 -> count.getAndIncrement())
.hide())
.subscribe(ts);
ts.assertNoError();
ts.assertNoValues();
ts.assertNotComplete();
assertThat(count).hasValue(Queues.XS_BUFFER_SIZE);
}
@Test
public void testMaxConcurrent5() {
final List<Long> requests = new ArrayList<>();
Flux.range(1, 100).doOnRequest(requests::add)
.flatMapSequential(toJust, 5, Queues.SMALL_BUFFER_SIZE)
.subscribe(ts);
ts.assertNoError();
ts.assertValueCount(100);
ts.assertComplete();
assertThat((long) requests.get(0)).isEqualTo(5);
assertThat((long) requests.get(1)).isEqualTo(1);
assertThat((long) requests.get(2)).isEqualTo(1);
assertThat((long) requests.get(3)).isEqualTo(1);
assertThat((long) requests.get(4)).isEqualTo(1);
assertThat((long) requests.get(5)).isEqualTo(1);
}
@SuppressWarnings("unchecked")
@Test
public void maxConcurrencyAndPrefetch() {
Flux<Integer> source = Flux.just(1);
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.mergeSequential(Arrays.asList(source, source, source), 1, 1)
.subscribe(ts);
ts.assertValues(1, 1, 1);
ts.assertNoError();
ts.assertComplete();
}
@Test
public void mergeSequentialPublisher() {
Flux<Integer> source = Flux.just(1);
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.mergeSequential(Flux.just(source, source, source)).subscribe(ts);
ts.assertValues(1, 1, 1);
ts.assertNoError();
ts.assertComplete();
}
@Test
public void mergeSequentialMaxConcurrencyAndPrefetch() {
Flux<Integer> source = Flux.just(1);
AssertSubscriber<Integer> ts = AssertSubscriber.create();
Flux.mergeSequential(Flux.just(source, source, source), 1, 1)
.subscribe(ts);
ts.assertValues(1, 1, 1);
ts.assertNoError();
ts.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void badPrefetch() throws Exception {
Flux<Integer> source = Flux.just(1);
try {
Flux.mergeSequential(Arrays.asList(source, source, source), 1, -99);
} catch (IllegalArgumentException ex) {
assertThat(ex).hasMessage("prefetch > 0 required but it was -99");
}
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void mappingBadPrefetch() throws Exception {
Flux<Integer> source = Flux.just(1);
try {
Flux.just(source, source, source).flatMapSequential(Flux.identityFunction(), 10, -99);
} catch (IllegalArgumentException ex) {
assertThat(ex).hasMessage("prefetch > 0 required but it was -99");
}
}
@Test
public void mergeSequentialZero() {
Flux.mergeSequential(Collections.<Flux<Integer>>emptyList())
.subscribeWith(AssertSubscriber.create())
.assertComplete().assertValues();
}
@SuppressWarnings("unchecked")
@Test
public void mergeSequentialOne() {
Flux.mergeSequential(Arrays.asList(Flux.just(1)))
.subscribeWith(AssertSubscriber.create())
.assertComplete().assertValues(1);
}
@Test
public void mergeSequentialTwo() {
Flux.mergeSequential(Arrays.asList(Flux.just(1), Flux.just(2)))
.subscribeWith(AssertSubscriber.create())
.assertComplete().assertValues(1, 2);
}
@Test
public void mergeSequentialTwoPrefetch() {
StepVerifier.create(Flux.mergeSequential(128,
Flux.just(1).concatWith(Flux.error(new Exception("test"))),
Flux.just(2)))
.expectNext(1)
.verifyErrorMessage("test");
}
@Test
public void mergeSequentialTwoDelayError() {
StepVerifier.create(Flux.mergeSequentialDelayError(128,
Flux.just(1).concatWith(Flux.error(new Exception("test"))),
Flux.just(2)))
.expectNext(1, 2)
.verifyErrorMessage("test");
}
@SuppressWarnings("unchecked")
@Test
public void mergeSequentialIterable() {
Flux.mergeSequential(Arrays.asList(Flux.just(1), Flux.just(2)))
.subscribeWith(AssertSubscriber.create())
.assertComplete().assertValues(1, 2);
}
@Test
public void mergeSequentialTwoDelayIterableError() {
StepVerifier.create(Flux.mergeSequentialDelayError(
Arrays.asList(Flux.just(1).concatWith(Flux.error(new Exception("test"))),
Flux.just(2)), 128, 128))
.expectNext(1, 2)
.verifyErrorMessage("test");
}
@SuppressWarnings("unchecked")
@Test
public void mergeSequentialPublisher2() {
Flux.mergeSequential(Flux.just(Flux.just(1), Flux.just(2)))
.subscribeWith(AssertSubscriber.create())
.assertComplete().assertValues(1, 2);
}
@Test
public void mergeSequentialTwoDelayPublisherError() {
StepVerifier.create(Flux.mergeSequentialDelayError(
Flux.just(Flux.just(1).concatWith(Flux.error(new Exception("test"))),
Flux.just(2)), 128, 128))
.expectNext(1, 2)
.verifyErrorMessage("test");
}
@Test
public void mergeSequentialLargeUnorderedEach100() {
Scheduler scheduler = Schedulers.boundedElastic();
AtomicBoolean comparisonFailure = new AtomicBoolean();
long count = Flux.range(0, 500)
.flatMapSequential(i -> {
//ensure each pack of 100 is delayed in inverse order
Duration sleep = Duration.ofMillis(600 - i % 100);
return Mono.delay(sleep)
.then(Mono.just(i))
.subscribeOn(scheduler);
})
.zipWith(Flux.range(0, Integer.MAX_VALUE))
.doOnNext(i -> {
if (!Objects.equals(i.getT1(), i.getT2())) {
// System.out.println(i);
comparisonFailure.set(true);
}
})
.count().block();
assertThat(count).isEqualTo(500L);
assertThat(comparisonFailure.get()).isFalse();
}
@Test
public void mergeSequentialLargeBadQueueSize() {
int prefetch = 32;
int maxConcurrency = 256;
Supplier<Queue<FluxMergeSequential.MergeSequentialInner<Integer>>> badQueueSupplier =
Queues.get(Math.min(prefetch, maxConcurrency));
FluxMergeSequential<Integer, Integer> fluxMergeSequential =
new FluxMergeSequential<>(Flux.range(0, 500),
Mono::just,
maxConcurrency, prefetch, FluxConcatMap.ErrorMode.IMMEDIATE,
badQueueSupplier);
StepVerifier.create(fluxMergeSequential.zipWith(Flux.range(0, Integer.MAX_VALUE)))
.expectErrorMatches(e -> e instanceof IllegalStateException &&
e.getMessage().startsWith("Too many subscribers for fluxMergeSequential on item: ") &&
e.getMessage().endsWith("; subscribers: 32"))
.verify();
}
@Test
public void mergeEmpty(){
StepVerifier.create(Flux.mergeSequential())
.verifyComplete();
}
@Test
public void mergeOne(){
StepVerifier.create(Flux.mergeSequential(Flux.just(1)))
.expectNext(1)
.verifyComplete();
}
//see https://github.com/reactor/reactor-core/issues/936
@Test
public void flatMapSequentialDelayErrorWithFluxError() {
StepVerifier.create(
Flux.just(
Flux.just(1, 2),
Flux.<Integer>error(new Exception("test")),
Flux.just(3, 4))
.flatMapSequentialDelayError(f -> f, 4, 4))
.expectNext(1, 2, 3, 4)
.verifyErrorMessage("test");
}
//see https://github.com/reactor/reactor-core/issues/936
@Test
public void flatMapSequentialDelayErrorWithMonoError() {
StepVerifier.create(
Flux.just(
Flux.just(1, 2),
Mono.<Integer>error(new Exception("test")),
Flux.just(3, 4))
.flatMapSequentialDelayError(f -> f, 4, 4))
.expectNext(1, 2, 3, 4)
.verifyErrorMessage("test");
}
//see https://github.com/reactor/reactor-core/issues/936
@Test
public void mergeSequentialDelayErrorWithFluxError() {
StepVerifier.create(
Flux.mergeSequentialDelayError(
Flux.just(
Flux.just(1, 2),
Flux.error(new Exception("test")),
Flux.just(3, 4))
, 4, 4)
)
.expectNext(1, 2, 3, 4)
.verifyErrorMessage("test");
}
//see https://github.com/reactor/reactor-core/issues/936
@Test
public void mergeSequentialDelayErrorWithMonoError() {
StepVerifier.create(
Flux.mergeSequentialDelayError(
Flux.just(
Flux.just(1, 2),
Mono.error(new Exception("test")),
Flux.just(3, 4))
, 4, 4)
)
.expectNext(1, 2, 3, 4)
.verifyErrorMessage("test");
}
@Test
public void cancellingSequentiallyMergedMonos() {
AtomicInteger cancelCounter = new AtomicInteger(5);
final Flux<Object> merge = Flux.mergeSequential(
Mono.never().doOnCancel(() -> System.out.println("Cancelling #1, remaining " + cancelCounter.decrementAndGet())),
Mono.never().doOnCancel(() -> System.out.println("Cancelling #2, remaining " + cancelCounter.decrementAndGet())),
Mono.never().doOnCancel(() -> System.out.println("Cancelling #3, remaining " + cancelCounter.decrementAndGet())),
Mono.never().doOnCancel(() -> System.out.println("Cancelling #4, remaining " + cancelCounter.decrementAndGet())),
Mono.never().doOnCancel(() -> System.out.println("Cancelling #5, remaining " + cancelCounter.decrementAndGet())));
merge.subscribe().dispose();
assertThat(cancelCounter).as("cancellation remaining").hasValue(0);
}
@Test
public void cancellingSequentiallyFlatMappedMonos() {
AtomicInteger cancelCounter = new AtomicInteger(5);
final Flux<Object> merge = Flux.range(1, 5)
.flatMapSequential(i -> Mono.never()
.doOnCancel(() -> System.out.println("Cancelling #" + i + ", remaining " + cancelCounter.decrementAndGet())));
merge.subscribe().dispose();
assertThat(cancelCounter).as("cancellation remaining").hasValue(0);
}
@Test
public void scanOperator(){
Flux<Integer> parent = Flux.range(1, 5);
FluxMergeSequential<Integer, Integer> test = new FluxMergeSequential<>(parent, t -> Flux.just(t), 3, 123, ErrorMode.END);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
}
@Test
public void scanMain() {
CoreSubscriber<Integer> actual = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxMergeSequential.MergeSequentialMain<Integer, Integer> test =
new FluxMergeSequential.MergeSequentialMain<>(actual, i -> Mono.just(i),
5, 123, ErrorMode.BOUNDARY, Queues.unbounded());
Subscription parent = Operators.emptySubscription();
test.onSubscribe(parent);
assertThat(test.scan(Scannable.Attr.ACTUAL)).isSameAs(actual);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(test.scan(Scannable.Attr.DELAY_ERROR)).isTrue();
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
test.requested = 35;
assertThat(test.scan(Scannable.Attr.REQUESTED_FROM_DOWNSTREAM)).isEqualTo(35);
assertThat(test.scan(Scannable.Attr.PREFETCH)).isEqualTo(5);
test.subscribers.add(new FluxMergeSequential.MergeSequentialInner<>(test, 123));
assertThat(test.scan(Scannable.Attr.BUFFERED)).isEqualTo(1);
assertThat(test.scan(Scannable.Attr.ERROR)).isNull();
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
test.onError(new IllegalStateException("boom"));
assertThat(test.scan(Scannable.Attr.ERROR)).isSameAs(test.error);
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
assertThat(test.scan(Scannable.Attr.BUFFERED)).isEqualTo(0);
assertThat(test.inners().count()).isEqualTo(0);
}
@Test
public void scanInner() {
CoreSubscriber<Integer> actual = new LambdaSubscriber<>(null, e -> {}, null, null);
FluxMergeSequential.MergeSequentialMain<Integer, Integer> main =
new FluxMergeSequential.MergeSequentialMain<Integer, Integer>(actual, i -> Mono.just(i),
5, 123, ErrorMode.IMMEDIATE, Queues.unbounded());
FluxMergeSequential.MergeSequentialInner<Integer> inner =
new FluxMergeSequential.MergeSequentialInner<>(main, 123);
Subscription parent = Operators.emptySubscription();
inner.onSubscribe(parent);
assertThat(inner.scan(Scannable.Attr.ACTUAL)).isSameAs(main);
assertThat(inner.scan(Scannable.Attr.PARENT)).isSameAs(parent);
assertThat(inner.scan(Scannable.Attr.PREFETCH)).isEqualTo(123);
assertThat(inner.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.SYNC);
inner.queue = new ConcurrentLinkedQueue<>();
inner.queue.add(1);
assertThat(inner.scan(Scannable.Attr.BUFFERED)).isEqualTo(1);
assertThat(inner.scan(Scannable.Attr.ERROR)).isNull();
assertThat(inner.scan(Scannable.Attr.TERMINATED)).isFalse();
inner.queue.clear();
inner.setDone();
assertThat(inner.scan(Scannable.Attr.TERMINATED)).isTrue();
assertThat(inner.scan(Scannable.Attr.CANCELLED)).isFalse();
inner.cancel();
assertThat(inner.scan(Scannable.Attr.CANCELLED)).isTrue();
assertThat(main.scan(Scannable.Attr.BUFFERED)).isEqualTo(0);
}
@Test
void discardsPrefetched() {
Hooks.onNextDropped(MemoryUtils.Tracked::safeRelease);
Hooks.onErrorDropped(e -> {});
Hooks.onOperatorError((e, v) -> null);
AssertSubscriber<MemoryUtils.Tracked> assertSubscriber = new AssertSubscriber<>(
Operators.enableOnDiscard(null, MemoryUtils.Tracked::safeRelease), 0
);
AtomicInteger prefetched = new AtomicInteger();
MemoryUtils.Tracked tracked1 = new MemoryUtils.Tracked("1", false);
MemoryUtils.Tracked tracked2 = new MemoryUtils.Tracked("2", false);
Flux<MemoryUtils.Tracked> flux = Flux
.just(tracked1, tracked2)
.map(Collections::singletonList)
.hide()
.doOnNext(t -> prefetched.incrementAndGet())
.flatMapIterable(Function.identity());
// .flatMapSequential(Mono::just);
flux.subscribe(assertSubscriber);
assertSubscriber.cancel();
assertThat(assertSubscriber.values()).isEmpty();
assertThat(prefetched.get()).isEqualTo(2);
assertThat(tracked1.isReleased()).isTrue();
assertThat(tracked2.isReleased()).isTrue();
Hooks.resetOnNextDropped();
Hooks.resetOnErrorDropped();
Hooks.resetOnNextError();
Hooks.resetOnOperatorError();
}
}
| FluxMergeSequentialTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/support/moneta/MoneyNumberTest.java | {
"start": 277,
"end": 2240
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
// Integer
Money money = Money.of(5000, Monetary.getCurrency("EUR"));
String moneyJSON = JSON.toJSONString(money);
Money moneyBack = JSON.parseObject(moneyJSON, Money.class);
assertEquals(5000, moneyBack.getNumber().intValue());
// Long
money = Money.of(1000L, Monetary.getCurrency("EUR"));
moneyJSON = JSON.toJSONString(money);
moneyBack = JSON.parseObject(moneyJSON, Money.class);
assertEquals(1000, moneyBack.getNumber().longValue());
// Byte
money = Money.of(0x4a, Monetary.getCurrency("EUR"));
moneyJSON = JSON.toJSONString(money);
moneyBack = JSON.parseObject(moneyJSON, Money.class);
assertEquals(74, moneyBack.getNumber().intValue());
// double
money = Money.of(new Double(1.12), Monetary.getCurrency("EUR"));
moneyJSON = JSON.toJSONString(money);
moneyBack = JSON.parseObject(moneyJSON, Money.class);
assertEquals(1.12d, moneyBack.getNumber().doubleValue());
// float
money = Money.of(new Float("2.01"), Monetary.getCurrency("EUR"));
moneyJSON = JSON.toJSONString(money);
moneyBack = JSON.parseObject(moneyJSON, Money.class);
assertEquals(2.01f, moneyBack.getNumber().floatValue());
// short
money = Money.of(new Short("2"), Monetary.getCurrency("EUR"));
moneyJSON = JSON.toJSONString(money);
moneyBack = JSON.parseObject(moneyJSON, Money.class);
assertEquals(2, moneyBack.getNumber().shortValue());
// BigInteger
money = Money.of(new BigDecimal("999999999999999999999"), Monetary.getCurrency("EUR"));
moneyJSON = JSON.toJSONString(money);
moneyBack = JSON.parseObject(moneyJSON, Money.class);
assertEquals("999999999999999999999", moneyBack.getNumber().toString());
}
}
| MoneyNumberTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java | {
"start": 1832,
"end": 5432
} | class ____ extends TransportBroadcastByNodeAction<
ForceMergeRequest,
BroadcastResponse,
TransportBroadcastByNodeAction.EmptyResult,
Void> {
private final IndicesService indicesService;
private final ThreadPool threadPool;
private final ProjectResolver projectResolver;
@Inject
public TransportForceMergeAction(
ClusterService clusterService,
TransportService transportService,
IndicesService indicesService,
ActionFilters actionFilters,
ProjectResolver projectResolver,
IndexNameExpressionResolver indexNameExpressionResolver
) {
super(
ForceMergeAction.NAME,
clusterService,
transportService,
actionFilters,
indexNameExpressionResolver,
ForceMergeRequest::new,
transportService.getThreadPool().executor(ThreadPool.Names.MANAGEMENT) // just for coordination work
);
this.indicesService = indicesService;
this.threadPool = transportService.getThreadPool();
this.projectResolver = projectResolver;
}
@Override
protected EmptyResult readShardResult(StreamInput in) throws IOException {
return EmptyResult.INSTANCE;
}
@Override
protected ResponseFactory<BroadcastResponse, EmptyResult> getResponseFactory(ForceMergeRequest request, ClusterState clusterState) {
return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new BroadcastResponse(
totalShards,
successfulShards,
failedShards,
shardFailures
);
}
@Override
protected ForceMergeRequest readRequestFrom(StreamInput in) throws IOException {
return new ForceMergeRequest(in);
}
@Override
protected void shardOperation(
ForceMergeRequest request,
ShardRouting shardRouting,
Task task,
Void nodeContext,
ActionListener<EmptyResult> listener
) {
assert (task instanceof CancellableTask) == false; // TODO: add cancellation handling here once the task supports it
SubscribableListener.<IndexShard>newForked(l -> {
IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex())
.getShard(shardRouting.shardId().id());
indexShard.ensureMutable(l.map(unused -> indexShard), false);
}).<EmptyResult>andThen((l, indexShard) -> {
threadPool.executor(ThreadPool.Names.FORCE_MERGE).execute(ActionRunnable.supply(l, () -> {
indexShard.forceMerge(request);
return EmptyResult.INSTANCE;
}));
}).addListener(listener);
}
/**
* The force merge request works against *all* shards.
*/
@Override
protected ShardsIterator shards(ClusterState clusterState, ForceMergeRequest request, String[] concreteIndices) {
return clusterState.routingTable(projectResolver.getProjectId()).allShards(concreteIndices);
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, ForceMergeRequest request) {
return state.blocks().globalBlockedException(projectResolver.getProjectId(), ClusterBlockLevel.METADATA_WRITE);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, ForceMergeRequest request, String[] concreteIndices) {
return state.blocks().indicesBlockedException(projectResolver.getProjectId(), ClusterBlockLevel.METADATA_WRITE, concreteIndices);
}
}
| TransportForceMergeAction |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java | {
"start": 4757,
"end": 46335
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestApplicationLimits.class);
final static int GB = 1024;
LeafQueue queue;
CSQueue root;
private final ResourceCalculator resourceCalculator = new DefaultResourceCalculator();
RMContext rmContext = null;
private CapacitySchedulerContext csContext;
@BeforeEach
public void setUp() throws IOException {
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration();
YarnConfiguration conf = new YarnConfiguration();
setupQueueConfiguration(csConf);
rmContext = TestUtils.getMockRMContext();
Resource clusterResource = Resources.createResource(10 * 16 * GB, 10 * 32);
csContext = createCSContext(csConf, resourceCalculator,
Resources.createResource(GB, 1), Resources.createResource(16*GB, 32),
clusterResource);
when(csContext.getRMContext()).thenReturn(rmContext);
CapacitySchedulerQueueContext queueContext = new CapacitySchedulerQueueContext(csContext);
setQueueHandler(csContext);
RMContainerTokenSecretManager containerTokenSecretManager =
new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
when(csContext.getContainerTokenSecretManager()).thenReturn(
containerTokenSecretManager);
CSQueueStore queues = new CSQueueStore();
root = CapacitySchedulerQueueManager
.parseQueue(queueContext, csConf, null, "root",
queues, queues,
TestUtils.spyHook);
root.updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
queue = spy(new LeafQueue(queueContext, A, root, null));
QueueResourceQuotas queueResourceQuotas = ((LeafQueue) queues.get(A))
.getQueueResourceQuotas();
doReturn(queueResourceQuotas).when(queue).getQueueResourceQuotas();
// Stub out ACL checks
doReturn(true).
when(queue).hasAccess(any(QueueACL.class),
any(UserGroupInformation.class));
// Some default values
doReturn(100).when(queue).getMaxApplications();
doReturn(25).when(queue).getMaxApplicationsPerUser();
}
private static final String A = "a";
private static final String B = "b";
private static final String C = "c";
private static final String D = "d";
private static final String AA1 = "a1";
private static final String AA2 = "a2";
private static final String AA3 = "a3";
private static final QueuePath ROOT_QUEUE_PATH =
new QueuePath(CapacitySchedulerConfiguration.ROOT);
private static final QueuePath A_QUEUE_PATH = ROOT_QUEUE_PATH.createNewLeaf(A);
private static final QueuePath B_QUEUE_PATH = ROOT_QUEUE_PATH.createNewLeaf(B);
private static final QueuePath C_QUEUE_PATH = ROOT_QUEUE_PATH.createNewLeaf(C);
private static final QueuePath D_QUEUE_PATH = ROOT_QUEUE_PATH.createNewLeaf(D);
private static final QueuePath AA1_QUEUE_PATH = A_QUEUE_PATH.createNewLeaf(AA1);
private static final QueuePath AA2_QUEUE_PATH = A_QUEUE_PATH.createNewLeaf(AA2);
private static final QueuePath AA3_QUEUE_PATH = A_QUEUE_PATH.createNewLeaf(AA3);
private void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
// Define top-level queues
conf.setQueues(ROOT_QUEUE_PATH, new String[] {A, B});
conf.setCapacity(A_QUEUE_PATH, 10);
conf.setCapacity(B_QUEUE_PATH, 90);
conf.setUserLimit(A_QUEUE_PATH, 50);
conf.setUserLimitFactor(A_QUEUE_PATH, 5.0f);
LOG.info("Setup top-level queues a and b");
}
private FiCaSchedulerApp getMockApplication(int appId, String user,
Resource amResource) {
FiCaSchedulerApp application = mock(FiCaSchedulerApp.class);
ApplicationAttemptId applicationAttemptId =
TestUtils.getMockApplicationAttemptId(appId, 0);
doReturn(applicationAttemptId.getApplicationId()).
when(application).getApplicationId();
doReturn(applicationAttemptId). when(application).getApplicationAttemptId();
doReturn(user).when(application).getUser();
doReturn(amResource).when(application).getAMResource();
doReturn(Priority.newInstance(0)).when(application).getPriority();
doReturn(CommonNodeLabelsManager.NO_LABEL).when(application)
.getAppAMNodePartitionName();
doReturn(amResource).when(application).getAMResource(
CommonNodeLabelsManager.NO_LABEL);
when(application.compareInputOrderTo(any(FiCaSchedulerApp.class))).thenCallRealMethod();
when(application.isRunnable()).thenReturn(true);
return application;
}
@Test
public void testAMResourceLimit() throws Exception {
final String user_0 = "user_0";
final String user_1 = "user_1";
// This uses the default 10% of cluster value for the max am resources
// which are allowed, at 80GB = 8GB for AM's at the queue level. The user
// am limit is 4G initially (based on the queue absolute capacity)
// when there is only 1 user, and drops to 2G (the userlimit) when there
// is a second user
Resource clusterResource = Resource.newInstance(80 * GB, 40);
root.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
queue = (LeafQueue) root.getChildQueues().stream().filter(
child -> child.getQueueName().equals(A))
.findFirst().orElseThrow(NoSuchElementException::new);
queue.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
ActiveUsersManager activeUsersManager = mock(ActiveUsersManager.class);
when(queue.getAbstractUsersManager()).thenReturn(activeUsersManager);
assertEquals(Resource.newInstance(8 * GB, 1),
queue.calculateAndGetAMResourceLimit());
assertEquals(Resource.newInstance(4 * GB, 1),
queue.getUserAMResourceLimit());
// Two apps for user_0, both start
int APPLICATION_ID = 0;
FiCaSchedulerApp app_0 = getMockApplication(APPLICATION_ID++, user_0,
Resource.newInstance(2 * GB, 1));
queue.submitApplicationAttempt(app_0, user_0);
assertEquals(1, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(1, queue.getNumActiveApplications(user_0));
assertEquals(0, queue.getNumPendingApplications(user_0));
when(activeUsersManager.getNumActiveUsers()).thenReturn(1);
FiCaSchedulerApp app_1 = getMockApplication(APPLICATION_ID++, user_0,
Resource.newInstance(2 * GB, 1));
queue.submitApplicationAttempt(app_1, user_0);
assertEquals(2, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(0, queue.getNumPendingApplications(user_0));
// AMLimits unchanged
assertEquals(Resource.newInstance(8 * GB, 1), queue.getAMResourceLimit());
assertEquals(Resource.newInstance(4 * GB, 1),
queue.getUserAMResourceLimit());
// One app for user_1, starts
FiCaSchedulerApp app_2 = getMockApplication(APPLICATION_ID++, user_1,
Resource.newInstance(2 * GB, 1));
queue.submitApplicationAttempt(app_2, user_1);
assertEquals(3, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(1, queue.getNumActiveApplications(user_1));
assertEquals(0, queue.getNumPendingApplications(user_1));
when(activeUsersManager.getNumActiveUsers()).thenReturn(2);
// Now userAMResourceLimit drops to the queue configured 50% as there is
// another user active
assertEquals(Resource.newInstance(8 * GB, 1), queue.getAMResourceLimit());
assertEquals(Resource.newInstance(2 * GB, 1),
queue.getUserAMResourceLimit());
// Second user_1 app cannot start
FiCaSchedulerApp app_3 = getMockApplication(APPLICATION_ID++, user_1,
Resource.newInstance(2 * GB, 1));
queue.submitApplicationAttempt(app_3, user_1);
assertEquals(3, queue.getNumActiveApplications());
assertEquals(1, queue.getNumPendingApplications());
assertEquals(1, queue.getNumActiveApplications(user_1));
assertEquals(1, queue.getNumPendingApplications(user_1));
// Now finish app so another should be activated
queue.finishApplicationAttempt(app_2, A);
assertEquals(3, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(1, queue.getNumActiveApplications(user_1));
assertEquals(0, queue.getNumPendingApplications(user_1));
}
@Test
public void testLimitsComputation() throws Exception {
final float epsilon = 1e-5f;
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
// Say cluster has 100 nodes of 16G each
Resource clusterResource =
Resources.createResource(100 * 16 * GB, 100 * 16);
CapacitySchedulerContext context = createCSContext(csConf, resourceCalculator,
Resources.createResource(GB, 1), Resources.createResource(16*GB, 16),
clusterResource);
CapacitySchedulerQueueManager queueManager = context.getCapacitySchedulerQueueManager();
CapacitySchedulerQueueContext queueContext = new CapacitySchedulerQueueContext(context);
CSQueueStore queues = new CSQueueStore();
CSQueue root =
CapacitySchedulerQueueManager.parseQueue(queueContext, csConf, null,
"root", queues, queues, TestUtils.spyHook);
queueManager.setRootQueue(root);
root.updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
LeafQueue queue = (LeafQueue)queues.get(A);
LOG.info("Queue 'A' -" +
" aMResourceLimit=" + queue.getAMResourceLimit() +
" UserAMResourceLimit=" +
queue.getUserAMResourceLimit());
Resource amResourceLimit = Resource.newInstance(160 * GB, 1);
assertThat(queue.calculateAndGetAMResourceLimit()).
isEqualTo(amResourceLimit);
assertThat(queue.getUserAMResourceLimit()).isEqualTo(
Resource.newInstance(80*GB, 1));
// Assert in metrics
assertThat(queue.getMetrics().getAMResourceLimitMB()).isEqualTo(
amResourceLimit.getMemorySize());
assertThat(queue.getMetrics().getAMResourceLimitVCores()).isEqualTo(
amResourceLimit.getVirtualCores());
assertEquals((int)(clusterResource.getMemorySize() * queue.getAbsoluteCapacity()),
queue.getMetrics().getAvailableMB());
// Add some nodes to the cluster & test new limits
clusterResource = Resources.createResource(120 * 16 * GB);
root.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
assertThat(queue.calculateAndGetAMResourceLimit()).isEqualTo(
Resource.newInstance(192 * GB, 1));
assertThat(queue.getUserAMResourceLimit()).isEqualTo(
Resource.newInstance(96*GB, 1));
assertEquals((int)(clusterResource.getMemorySize() * queue.getAbsoluteCapacity()),
queue.getMetrics().getAvailableMB());
// should return -1 if per queue setting not set
assertEquals(
(int)CapacitySchedulerConfiguration.UNDEFINED,
csConf.getMaximumApplicationsPerQueue(queue.getQueuePathObject()));
int expectedMaxApps =
(int)
(CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS *
queue.getAbsoluteCapacity());
assertEquals(expectedMaxApps, queue.getMaxApplications());
int expectedMaxAppsPerUser = Math.min(expectedMaxApps,
(int)(expectedMaxApps * (queue.getUserLimit()/100.0f) *
queue.getUserLimitFactor()));
assertEquals(expectedMaxAppsPerUser, queue.getMaxApplicationsPerUser());
// should default to global setting if per queue setting not set
assertEquals(CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT,
csConf.getMaximumApplicationMasterResourcePerQueuePercent(
queue.getQueuePathObject()), epsilon);
// Change the per-queue max AM resources percentage.
csConf.setFloat(PREFIX + queue.getQueuePath()
+ ".maximum-am-resource-percent", 0.5f);
queueContext.reinitialize();
// Re-create queues to get new configs.
queues = new CSQueueStore();
root = CapacitySchedulerQueueManager.parseQueue(
queueContext, csConf, null, "root",
queues, queues, TestUtils.spyHook);
clusterResource = Resources.createResource(100 * 16 * GB);
queueManager.setRootQueue(root);
root.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
queue = (LeafQueue)queues.get(A);
assertEquals(0.5f,
csConf.getMaximumApplicationMasterResourcePerQueuePercent(
queue.getQueuePathObject()), epsilon);
assertThat(queue.calculateAndGetAMResourceLimit()).isEqualTo(
Resource.newInstance(800 * GB, 1));
assertThat(queue.getUserAMResourceLimit()).isEqualTo(
Resource.newInstance(400*GB, 1));
// Change the per-queue max applications.
csConf.setInt(PREFIX + queue.getQueuePath() + ".maximum-applications",
9999);
queueContext.reinitialize();
// Re-create queues to get new configs.
queues = new CSQueueStore();
root = CapacitySchedulerQueueManager.parseQueue(
queueContext, csConf, null, "root",
queues, queues, TestUtils.spyHook);
root.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
queue = (LeafQueue)queues.get(A);
assertEquals(9999, (int)csConf.getMaximumApplicationsPerQueue(
queue.getQueuePathObject()));
assertEquals(9999, queue.getMaxApplications());
expectedMaxAppsPerUser = Math.min(9999, (int)(9999 *
(queue.getUserLimit()/100.0f) * queue.getUserLimitFactor()));
assertEquals(expectedMaxAppsPerUser, queue.getMaxApplicationsPerUser());
}
@Test
public void testActiveApplicationLimits() throws Exception {
final String user_0 = "user_0";
final String user_1 = "user_1";
final String user_2 = "user_2";
assertEquals(Resource.newInstance(16 * GB, 1),
queue.calculateAndGetAMResourceLimit());
assertEquals(Resource.newInstance(8 * GB, 1),
queue.getUserAMResourceLimit());
int APPLICATION_ID = 0;
// Submit first application
FiCaSchedulerApp app_0 = getMockApplication(APPLICATION_ID++, user_0,
Resources.createResource(4 * GB, 0));
queue.submitApplicationAttempt(app_0, user_0);
assertEquals(1, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(1, queue.getNumActiveApplications(user_0));
assertEquals(0, queue.getNumPendingApplications(user_0));
// Submit second application
FiCaSchedulerApp app_1 = getMockApplication(APPLICATION_ID++, user_0,
Resources.createResource(4 * GB, 0));
queue.submitApplicationAttempt(app_1, user_0);
assertEquals(2, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(0, queue.getNumPendingApplications(user_0));
// Submit third application, should remain pending due to user amlimit
FiCaSchedulerApp app_2 = getMockApplication(APPLICATION_ID++, user_0,
Resources.createResource(4 * GB, 0));
queue.submitApplicationAttempt(app_2, user_0);
assertEquals(2, queue.getNumActiveApplications());
assertEquals(1, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(1, queue.getNumPendingApplications(user_0));
// Finish one application, app_2 should be activated
queue.finishApplicationAttempt(app_0, A);
assertEquals(2, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(0, queue.getNumPendingApplications(user_0));
// Submit another one for user_0
FiCaSchedulerApp app_3 = getMockApplication(APPLICATION_ID++, user_0,
Resources.createResource(4 * GB, 0));
queue.submitApplicationAttempt(app_3, user_0);
assertEquals(2, queue.getNumActiveApplications());
assertEquals(1, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(1, queue.getNumPendingApplications(user_0));
// Submit first app for user_1
FiCaSchedulerApp app_4 = getMockApplication(APPLICATION_ID++, user_1,
Resources.createResource(8 * GB, 0));
queue.submitApplicationAttempt(app_4, user_1);
assertEquals(3, queue.getNumActiveApplications());
assertEquals(1, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(1, queue.getNumPendingApplications(user_0));
assertEquals(1, queue.getNumActiveApplications(user_1));
assertEquals(0, queue.getNumPendingApplications(user_1));
// Submit first app for user_2, should block due to queue amlimit
FiCaSchedulerApp app_5 = getMockApplication(APPLICATION_ID++, user_2,
Resources.createResource(8 * GB, 0));
queue.submitApplicationAttempt(app_5, user_2);
assertEquals(3, queue.getNumActiveApplications());
assertEquals(2, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(1, queue.getNumPendingApplications(user_0));
assertEquals(1, queue.getNumActiveApplications(user_1));
assertEquals(0, queue.getNumPendingApplications(user_1));
assertEquals(1, queue.getNumPendingApplications(user_2));
// Now finish one app of user_1 so app_5 should be activated
queue.finishApplicationAttempt(app_4, A);
assertEquals(3, queue.getNumActiveApplications());
assertEquals(1, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(1, queue.getNumPendingApplications(user_0));
assertEquals(0, queue.getNumActiveApplications(user_1));
assertEquals(0, queue.getNumPendingApplications(user_1));
assertEquals(1, queue.getNumActiveApplications(user_2));
assertEquals(0, queue.getNumPendingApplications(user_2));
}
@Test
public void testActiveLimitsWithKilledApps() throws Exception {
final String user_0 = "user_0";
int APPLICATION_ID = 0;
// Submit first application
FiCaSchedulerApp app_0 = getMockApplication(APPLICATION_ID++, user_0,
Resources.createResource(4 * GB, 0));
queue.submitApplicationAttempt(app_0, user_0);
assertEquals(1, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(1, queue.getNumActiveApplications(user_0));
assertEquals(0, queue.getNumPendingApplications(user_0));
assertTrue(queue.getApplications().contains(app_0));
// Submit second application
FiCaSchedulerApp app_1 = getMockApplication(APPLICATION_ID++, user_0,
Resources.createResource(4 * GB, 0));
queue.submitApplicationAttempt(app_1, user_0);
assertEquals(2, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(0, queue.getNumPendingApplications(user_0));
assertTrue(queue.getApplications().contains(app_1));
// Submit third application, should remain pending
FiCaSchedulerApp app_2 = getMockApplication(APPLICATION_ID++, user_0,
Resources.createResource(4 * GB, 0));
queue.submitApplicationAttempt(app_2, user_0);
assertEquals(2, queue.getNumActiveApplications());
assertEquals(1, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(1, queue.getNumPendingApplications(user_0));
assertTrue(queue.getPendingApplications().contains(app_2));
// Submit fourth application, should remain pending
FiCaSchedulerApp app_3 = getMockApplication(APPLICATION_ID++, user_0,
Resources.createResource(4 * GB, 0));
queue.submitApplicationAttempt(app_3, user_0);
assertEquals(2, queue.getNumActiveApplications());
assertEquals(2, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(2, queue.getNumPendingApplications(user_0));
assertTrue(queue.getPendingApplications().contains(app_3));
// Kill 3rd pending application
queue.finishApplicationAttempt(app_2, A);
assertEquals(2, queue.getNumActiveApplications());
assertEquals(1, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(1, queue.getNumPendingApplications(user_0));
assertFalse(queue.getPendingApplications().contains(app_2));
assertFalse(queue.getApplications().contains(app_2));
// Finish 1st application, app_3 should become active
queue.finishApplicationAttempt(app_0, A);
assertEquals(2, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(2, queue.getNumActiveApplications(user_0));
assertEquals(0, queue.getNumPendingApplications(user_0));
assertTrue(queue.getApplications().contains(app_3));
assertFalse(queue.getPendingApplications().contains(app_3));
assertFalse(queue.getApplications().contains(app_0));
// Finish 2nd application
queue.finishApplicationAttempt(app_1, A);
assertEquals(1, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(1, queue.getNumActiveApplications(user_0));
assertEquals(0, queue.getNumPendingApplications(user_0));
assertFalse(queue.getApplications().contains(app_1));
// Finish 4th application
queue.finishApplicationAttempt(app_3, A);
assertEquals(0, queue.getNumActiveApplications());
assertEquals(0, queue.getNumPendingApplications());
assertEquals(0, queue.getNumActiveApplications(user_0));
assertEquals(0, queue.getNumPendingApplications(user_0));
assertFalse(queue.getApplications().contains(app_3));
}
@Test
public void testHeadroom() throws Exception {
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration();
csConf.setUserLimit(A_QUEUE_PATH, 25);
setupQueueConfiguration(csConf);
// Say cluster has 100 nodes of 16G each
Resource clusterResource = Resources.createResource(100 * 16 * GB);
CapacitySchedulerContext context = createCSContext(csConf, resourceCalculator,
Resources.createResource(GB), Resources.createResource(16*GB), clusterResource);
CapacitySchedulerQueueManager queueManager = context.getCapacitySchedulerQueueManager();
CapacitySchedulerQueueContext queueContext = new CapacitySchedulerQueueContext(context);
CSQueueStore queues = new CSQueueStore();
CSQueue rootQueue = CapacitySchedulerQueueManager.parseQueue(queueContext,
csConf, null, "root", queues, queues, TestUtils.spyHook);
queueManager.setRootQueue(rootQueue);
rootQueue.updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
ResourceUsage queueCapacities = rootQueue.getQueueResourceUsage();
when(context.getClusterResourceUsage())
.thenReturn(queueCapacities);
// Manipulate queue 'a'
LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue)queues.get(A));
queue.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
String host_0 = "host_0";
String rack_0 = "rack_0";
FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 16*GB);
final String user_0 = "user_0";
final String user_1 = "user_1";
RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
RMContext rmContext = TestUtils.getMockRMContext();
RMContext spyRMContext = spy(rmContext);
ConcurrentMap<ApplicationId, RMApp> spyApps =
spy(new ConcurrentHashMap<ApplicationId, RMApp>());
RMApp rmApp = mock(RMApp.class);
ResourceRequest amResourceRequest = mock(ResourceRequest.class);
Resource amResource = Resources.createResource(0, 0);
when(amResourceRequest.getCapability()).thenReturn(amResource);
when(rmApp.getAMResourceRequests()).thenReturn(
Collections.singletonList(amResourceRequest));
doReturn(rmApp)
.when(spyApps).get(ArgumentMatchers.<ApplicationId>any());
when(spyRMContext.getRMApps()).thenReturn(spyApps);
RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
when(rmApp.getRMAppAttempt(any()))
.thenReturn(rmAppAttempt);
when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
doReturn(rmApp)
.when(spyApps).get(ArgumentMatchers.<ApplicationId>any());
doReturn(true).when(spyApps)
.containsKey(ArgumentMatchers.<ApplicationId>any());
Priority priority_1 = TestUtils.createMockPriority(1);
// Submit first application with some resource-requests from user_0,
// and check headroom
final ApplicationAttemptId appAttemptId_0_0 =
TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(
appAttemptId_0_0, user_0, queue,
queue.getAbstractUsersManager(), spyRMContext);
queue.submitApplicationAttempt(app_0_0, user_0);
List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
app_0_0_requests.add(
TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2,
true, priority_1, recordFactory));
app_0_0.updateResourceRequests(app_0_0_requests);
// Schedule to compute
queue.assignContainers(clusterResource, node_0, new ResourceLimits(
clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
Resource expectedHeadroom = Resources.createResource(5*16*GB, 1);
assertEquals(expectedHeadroom, app_0_0.getHeadroom());
// Submit second application from user_0, check headroom
final ApplicationAttemptId appAttemptId_0_1 =
TestUtils.getMockApplicationAttemptId(1, 0);
FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(
appAttemptId_0_1, user_0, queue,
queue.getAbstractUsersManager(), spyRMContext);
queue.submitApplicationAttempt(app_0_1, user_0);
List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
app_0_1_requests.add(
TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2,
true, priority_1, recordFactory));
app_0_1.updateResourceRequests(app_0_1_requests);
// Schedule to compute
queue.assignContainers(clusterResource, node_0, new ResourceLimits(
clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute
assertEquals(expectedHeadroom, app_0_0.getHeadroom());
assertEquals(expectedHeadroom, app_0_1.getHeadroom());// no change
// Submit first application from user_1, check for new headroom
final ApplicationAttemptId appAttemptId_1_0 =
TestUtils.getMockApplicationAttemptId(2, 0);
FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(
appAttemptId_1_0, user_1, queue,
queue.getAbstractUsersManager(), spyRMContext);
queue.submitApplicationAttempt(app_1_0, user_1);
List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
app_1_0_requests.add(
TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2,
true, priority_1, recordFactory));
app_1_0.updateResourceRequests(app_1_0_requests);
// Schedule to compute
queue.assignContainers(clusterResource, node_0, new ResourceLimits(
clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute
expectedHeadroom = Resources.createResource(10*16*GB / 2, 1); // changes
assertEquals(expectedHeadroom, app_0_0.getHeadroom());
assertEquals(expectedHeadroom, app_0_1.getHeadroom());
assertEquals(expectedHeadroom, app_1_0.getHeadroom());
// Now reduce cluster size and check for the smaller headroom
clusterResource = Resources.createResource(90*16*GB);
rootQueue.updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
// Any change is cluster resource needs to enforce user-limit recomputation.
// In existing code, LeafQueue#updateClusterResource handled this. However
// here that method was not used.
queue.getUsersManager().userLimitNeedsRecompute();
queue.assignContainers(clusterResource, node_0, new ResourceLimits(
clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute
expectedHeadroom = Resources.createResource(9*16*GB / 2, 1); // changes
assertEquals(expectedHeadroom, app_0_0.getHeadroom());
assertEquals(expectedHeadroom, app_0_1.getHeadroom());
assertEquals(expectedHeadroom, app_1_0.getHeadroom());
}
private Configuration getConfigurationWithQueueLabels(Configuration config) {
CapacitySchedulerConfiguration conf =
new CapacitySchedulerConfiguration(config);
// Define top-level
conf.setQueues(ROOT_QUEUE_PATH,
new String[]{"a", "b", "c", "d"});
conf.setCapacityByLabel(ROOT_QUEUE_PATH, "x", 100);
conf.setCapacityByLabel(ROOT_QUEUE_PATH, "y", 100);
conf.setCapacityByLabel(ROOT_QUEUE_PATH, "z", 100);
conf.setInt(CapacitySchedulerConfiguration.QUEUE_GLOBAL_MAX_APPLICATION,
20);
conf.setInt("yarn.scheduler.capacity.root.a.a1.maximum-applications", 1);
conf.setFloat("yarn.scheduler.capacity.root.d.user-limit-factor", 0.1f);
conf.setInt("yarn.scheduler.capacity.maximum-applications", 4);
conf.setQueues(A_QUEUE_PATH, new String[]{"a1", "a2", "a3"});
conf.setCapacity(A_QUEUE_PATH, 50);
conf.setCapacity(B_QUEUE_PATH, 50);
conf.setCapacity(C_QUEUE_PATH, 0);
conf.setCapacity(D_QUEUE_PATH, 0);
conf.setCapacity(AA1_QUEUE_PATH, 50);
conf.setCapacity(AA2_QUEUE_PATH, 50);
conf.setCapacity(AA3_QUEUE_PATH, 0);
conf.setCapacityByLabel(A_QUEUE_PATH, "y", 25);
conf.setCapacityByLabel(B_QUEUE_PATH, "y", 50);
conf.setCapacityByLabel(C_QUEUE_PATH, "y", 25);
conf.setCapacityByLabel(D_QUEUE_PATH, "y", 0);
conf.setCapacityByLabel(A_QUEUE_PATH, "x", 50);
conf.setCapacityByLabel(B_QUEUE_PATH, "x", 50);
conf.setCapacityByLabel(A_QUEUE_PATH, "z", 50);
conf.setCapacityByLabel(B_QUEUE_PATH, "z", 50);
conf.setCapacityByLabel(AA1_QUEUE_PATH, "x", 100);
conf.setCapacityByLabel(AA2_QUEUE_PATH, "x", 0);
conf.setCapacityByLabel(AA1_QUEUE_PATH, "y", 25);
conf.setCapacityByLabel(AA2_QUEUE_PATH, "y", 75);
conf.setCapacityByLabel(AA2_QUEUE_PATH, "z", 75);
conf.setCapacityByLabel(AA3_QUEUE_PATH, "z", 25);
return conf;
}
private Set<String> toSet(String... elements) {
Set<String> set = Sets.newHashSet(elements);
return set;
}
@Test
@Timeout(value = 120)
public void testApplicationLimitSubmit() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
mgr.init(conf);
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(
ImmutableSet.of("x", "y", "z"));
// set mapping:
// h1 -> x
// h2 -> y
mgr.addLabelsToNode(
ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
mgr.addLabelsToNode(
ImmutableMap.of(NodeId.newInstance("h2", 0), toSet("y")));
// inject node label manager
MockRM rm = new MockRM(getConfigurationWithQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm.getRMContext().setNodeLabelManager(mgr);
rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 4096);
MockNM nm2 = rm.registerNode("h2:1234", 4096);
MockNM nm3 = rm.registerNode("h3:1234", 4096);
// Submit application to queue c where the default partition capacity is
// zero
RMApp app1 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c")
.withWaitForAppAcceptedState(false)
.build());
rm.drainEvents();
rm.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
assertEquals(RMAppState.ACCEPTED, app1.getState());
rm.killApp(app1.getApplicationId());
RMApp app2 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withWaitForAppAcceptedState(false)
.build());
rm.drainEvents();
rm.waitForState(app2.getApplicationId(), RMAppState.ACCEPTED);
assertEquals(RMAppState.ACCEPTED, app2.getState());
// Check second application is rejected and based on queue level max
// application app is rejected
RMApp app3 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withWaitForAppAcceptedState(false)
.build());
rm.drainEvents();
rm.waitForState(app3.getApplicationId(), RMAppState.FAILED);
assertEquals(RMAppState.FAILED, app3.getState());
assertEquals(
"org.apache.hadoop.security.AccessControlException: "
+ "Queue root.a.a1 already has 1 applications, cannot accept "
+ "submission of application: " + app3.getApplicationId(),
app3.getDiagnostics().toString());
// based on per user max app settings, app should be rejected instantly
RMApp app13 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("d")
.withWaitForAppAcceptedState(false)
.build());
rm.drainEvents();
rm.waitForState(app13.getApplicationId(), RMAppState.FAILED);
assertEquals(RMAppState.FAILED, app13.getState());
assertEquals(
"org.apache.hadoop.security.AccessControlException: Queue"
+ " root.d already has 0 applications from user user cannot"
+ " accept submission of application: " + app13.getApplicationId(),
app13.getDiagnostics().toString());
RMApp app11 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm)
.withAppName("app")
.withUser("user2")
.withAcls(null)
.withQueue("a2")
.withWaitForAppAcceptedState(false)
.build());
rm.drainEvents();
rm.waitForState(app11.getApplicationId(), RMAppState.ACCEPTED);
assertEquals(RMAppState.ACCEPTED, app11.getState());
RMApp app12 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm)
.withAppName("app")
.withUser("user2")
.withAcls(null)
.withQueue("a2")
.withWaitForAppAcceptedState(false)
.build());
rm.drainEvents();
rm.waitForState(app12.getApplicationId(), RMAppState.ACCEPTED);
assertEquals(RMAppState.ACCEPTED, app12.getState());
// based on system max limit application is rejected
RMApp app14 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm)
.withAppName("app")
.withUser("user2")
.withAcls(null)
.withQueue("a2")
.withWaitForAppAcceptedState(false)
.build());
rm.drainEvents();
rm.waitForState(app14.getApplicationId(), RMAppState.ACCEPTED);
RMApp app15 = MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm)
.withAppName("app")
.withUser("user2")
.withAcls(null)
.withQueue("a2")
.withWaitForAppAcceptedState(false)
.build());
rm.drainEvents();
rm.waitForState(app15.getApplicationId(), RMAppState.FAILED);
assertEquals(RMAppState.FAILED, app15.getState());
assertEquals(
"Maximum system application limit reached,cannot"
+ " accept submission of application: " + app15.getApplicationId(),
app15.getDiagnostics().toString());
rm.killApp(app2.getApplicationId());
rm.killApp(app13.getApplicationId());
rm.killApp(app14.getApplicationId());
rm.stop();
}
// Test that max AM limit is correct in the case where one resource is
// depleted but the other is not. Use DominantResourceCalculator.
@Test
public void testAMResourceLimitWithDRCAndFullParent() throws Exception {
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
csConf.setFloat(CapacitySchedulerConfiguration.
MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT, 0.3f);
// Total cluster resources.
Resource clusterResource = Resources.createResource(100 * GB, 1000);
CapacitySchedulerQueueContext queueContext = new CapacitySchedulerQueueContext(
createCSContext(csConf, new DominantResourceCalculator(), Resources.createResource(GB),
Resources.createResource(16*GB), clusterResource));
// Set up queue hierarchy.
CSQueueStore queues = new CSQueueStore();
CSQueue rootQueue = CapacitySchedulerQueueManager.parseQueue(queueContext,
csConf, null, "root", queues, queues, TestUtils.spyHook);
rootQueue.updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
// Queue "queueA" has a 30% capacity guarantee. The max pct of "queueA" that
// can be used for AMs is 30%. So, 30% of <memory: 100GB, vCores: 1000> is
// <memory: 30GB, vCores: 30>, which is the guaranteed capacity of "queueA".
// 30% of that (rounded to the nearest 1GB) is <memory: 9GB, vCores: 9>. The
// max AM queue limit should never be less than that for any resource.
LeafQueue queueA = TestLeafQueue.stubLeafQueue((LeafQueue)queues.get(A));
queueA.setCapacity(30.0f);
queueA.setUserLimitFactor(10f);
queueA.setMaxAMResourcePerQueuePercent(0.3f);
// Make sure "queueA" knows the total cluster resource.
queueA.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
// Get "queueA"'s guaranteed capacity (<memory: 30GB, vCores: 300>).
Resource capacity =
Resources.multiply(clusterResource, (queueA.getCapacity()/100));
// Limit is the actual resources available to "queueA". The following
// simulates the case where a second queue ("queueB") has "borrowed" almost
// all of "queueA"'s resources because "queueB" has a max capacity of 100%
// and has gone well over its guaranteed capacity. In this case, "queueB"
// has used 99GB of memory and used 505 vCores. This is to make vCores
// dominant in the calculations for the available resources.
when(queueA.getEffectiveCapacity(any())).thenReturn(capacity);
Resource limit = Resource.newInstance(1024, 495);
ResourceLimits currentResourceLimits =
new ResourceLimits(limit, Resources.none());
queueA.updateClusterResource(clusterResource, currentResourceLimits);
Resource expectedAmLimit = Resources.multiply(capacity,
queueA.getMaxAMResourcePerQueuePercent());
Resource amLimit = queueA.calculateAndGetAMResourceLimit();
assertTrue(amLimit.getMemorySize() >= expectedAmLimit.getMemorySize(),
"AM memory limit is less than expected: Expected: " +
expectedAmLimit.getMemorySize() + "; Computed: "
+ amLimit.getMemorySize());
assertTrue(amLimit.getVirtualCores() >= expectedAmLimit.getVirtualCores(),
"AM vCore limit is less than expected: Expected: " +
expectedAmLimit.getVirtualCores() + "; Computed: "
+ amLimit.getVirtualCores());
}
private CapacitySchedulerContext createCSContext(CapacitySchedulerConfiguration csConf,
ResourceCalculator rc, Resource minResource, Resource maxResource, Resource clusterResource) {
YarnConfiguration conf = new YarnConfiguration();
CapacitySchedulerContext context = mock(CapacitySchedulerContext.class);
when(context.getConfiguration()).thenReturn(csConf);
when(context.getConf()).thenReturn(conf);
when(context.getMinimumResourceCapability()).
thenReturn(minResource);
when(context.getMaximumResourceCapability()).
thenReturn(maxResource);
when(context.getResourceCalculator()).
thenReturn(rc);
CapacitySchedulerQueueManager queueManager = new CapacitySchedulerQueueManager(conf,
rmContext.getNodeLabelManager(), null);
when(context.getPreemptionManager()).thenReturn(new PreemptionManager());
when(context.getCapacitySchedulerQueueManager()).thenReturn(queueManager);
when(context.getRMContext()).thenReturn(rmContext);
when(context.getPreemptionManager()).thenReturn(new PreemptionManager());
setQueueHandler(context);
// Total cluster resources.
when(context.getClusterResource()).thenReturn(clusterResource);
return context;
}
}
| TestApplicationLimits |
java | apache__camel | components/camel-beanio/src/main/java/org/apache/camel/dataformat/beanio/BeanIOIterator.java | {
"start": 968,
"end": 2287
} | class ____ implements Iterator<Object>, Closeable {
private BeanReader reader;
private transient Object next;
private transient Object forceNext;
public BeanIOIterator(BeanReader reader) {
this.reader = reader;
this.next = next();
}
@Override
public void close() throws IOException {
if (reader != null) {
reader.close();
reader = null;
}
}
@Override
public boolean hasNext() {
return next != null;
}
@Override
public Object next() {
Object answer = next;
if (answer == null) {
answer = reader.read();
// after read we may force a next
if (forceNext != null) {
answer = forceNext;
forceNext = null;
}
} else {
next = reader.read();
// after read we may force a next
if (forceNext != null) {
next = forceNext;
forceNext = null;
}
}
return answer;
}
@Override
public void remove() {
// noop
}
/**
* Sets a custom object as the next, such as from a custom error handler
*/
public void setNext(Object next) {
this.forceNext = next;
}
}
| BeanIOIterator |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy-client/runtime/src/test/java/io/quarkus/restclient/runtime/QuarkusRestClientBuilderTest.java | {
"start": 659,
"end": 3589
} | class ____ {
private static final String TLS_TRUST_ALL = "quarkus.tls.trust-all";
@Test
public void preservesCustomSslContextWhenTrustAllEnabled() throws Exception {
QuarkusRestClientBuilder builder = new QuarkusRestClientBuilder();
// set a mocked config that enables trust-all
Config mockConfig = mock(Config.class);
when(mockConfig.getOptionalValue(TLS_TRUST_ALL, Boolean.class)).thenReturn(Optional.of(Boolean.TRUE));
setQuarkusRestClientBuilderField(builder, "config", mockConfig);
// set a custom SSLContext on the builder
SSLContext custom = SSLContext.getInstance("TLS");
custom.init(null, null, new SecureRandom());
setQuarkusRestClientBuilderField(builder, "sslContext", custom);
ResteasyClientBuilder clientBuilder = mock(ResteasyClientBuilder.class);
// invoke private configureTrustAll method
Method m = QuarkusRestClientBuilder.class.getDeclaredMethod("configureTrustAll", ResteasyClientBuilder.class);
m.setAccessible(true);
m.invoke(builder, clientBuilder);
// hostname verifier should be set to NoopHostnameVerifier
verify(clientBuilder, times(1)).hostnameVerifier(any(NoopHostnameVerifier.class));
// but sslContext should NOT be overridden when the user provided one
verify(clientBuilder, never()).sslContext(any(SSLContext.class));
}
@Test
public void createsTrustAllSslContextWhenNoCustomProvided() throws Exception {
QuarkusRestClientBuilder builder = new QuarkusRestClientBuilder();
// set a mocked config that enables trust-all
Config mockConfig = mock(Config.class);
when(mockConfig.getOptionalValue(TLS_TRUST_ALL, Boolean.class)).thenReturn(Optional.of(Boolean.TRUE));
setQuarkusRestClientBuilderField(builder, "config", mockConfig);
// ensure sslContext field is null (no custom provided)
setQuarkusRestClientBuilderField(builder, "sslContext", null);
ResteasyClientBuilder clientBuilder = mock(ResteasyClientBuilder.class);
// invoke private configureTrustAll method
Method m = QuarkusRestClientBuilder.class.getDeclaredMethod("configureTrustAll", ResteasyClientBuilder.class);
m.setAccessible(true);
m.invoke(builder, clientBuilder);
// hostname verifier should be set to NoopHostnameVerifier
verify(clientBuilder, times(1)).hostnameVerifier(any(NoopHostnameVerifier.class));
// sslContext should be set to a newly created SSLContext
verify(clientBuilder, times(1)).sslContext(any(SSLContext.class));
}
private static void setQuarkusRestClientBuilderField(Object target, String name, Object value) throws Exception {
Field f = QuarkusRestClientBuilder.class.getDeclaredField(name);
f.setAccessible(true);
f.set(target, value);
}
}
| QuarkusRestClientBuilderTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bootstrap/binding/annotations/embedded/InternetProvider.java | {
"start": 326,
"end": 814
} | class ____ {
private Integer id;
private String brandName;
private LegalStructure owner;
public String getBrandName() {
return brandName;
}
public void setBrandName(String brandName) {
this.brandName = brandName;
}
@Id
@GeneratedValue
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public LegalStructure getOwner() {
return owner;
}
public void setOwner(LegalStructure owner) {
this.owner = owner;
}
}
| InternetProvider |
java | mapstruct__mapstruct | processor/src/test/resources/fixtures/21/org/mapstruct/ap/test/bugs/_913/DomainDtoWithNcvsAlwaysMapperImpl.java | {
"start": 539,
"end": 7620
} | class ____ implements DomainDtoWithNcvsAlwaysMapper {
private final Helper helper = new Helper();
@Override
public Domain create(DtoWithPresenceCheck source) {
if ( source == null ) {
return null;
}
Domain domain = createNullDomain();
if ( source.hasStrings() ) {
List<String> list = source.getStrings();
domain.setStrings( new LinkedHashSet<String>( list ) );
}
if ( source.hasStrings() ) {
domain.setLongs( stringListToLongSet( source.getStrings() ) );
}
if ( source.hasStringsInitialized() ) {
List<String> list1 = source.getStringsInitialized();
domain.setStringsInitialized( new LinkedHashSet<String>( list1 ) );
}
if ( source.hasStringsInitialized() ) {
domain.setLongsInitialized( stringListToLongSet( source.getStringsInitialized() ) );
}
if ( source.hasStringsWithDefault() ) {
List<String> list2 = source.getStringsWithDefault();
domain.setStringsWithDefault( new ArrayList<String>( list2 ) );
}
else {
domain.setStringsWithDefault( helper.toList( "3" ) );
}
return domain;
}
@Override
public void update(DtoWithPresenceCheck source, Domain target) {
if ( source == null ) {
return;
}
if ( target.getStrings() != null ) {
if ( source.hasStrings() ) {
target.getStrings().clear();
target.getStrings().addAll( source.getStrings() );
}
}
else {
if ( source.hasStrings() ) {
List<String> list = source.getStrings();
target.setStrings( new LinkedHashSet<String>( list ) );
}
}
if ( target.getLongs() != null ) {
if ( source.hasStrings() ) {
target.getLongs().clear();
target.getLongs().addAll( stringListToLongSet( source.getStrings() ) );
}
}
else {
if ( source.hasStrings() ) {
target.setLongs( stringListToLongSet( source.getStrings() ) );
}
}
if ( target.getStringsInitialized() != null ) {
if ( source.hasStringsInitialized() ) {
target.getStringsInitialized().clear();
target.getStringsInitialized().addAll( source.getStringsInitialized() );
}
}
else {
if ( source.hasStringsInitialized() ) {
List<String> list1 = source.getStringsInitialized();
target.setStringsInitialized( new LinkedHashSet<String>( list1 ) );
}
}
if ( target.getLongsInitialized() != null ) {
if ( source.hasStringsInitialized() ) {
target.getLongsInitialized().clear();
target.getLongsInitialized().addAll( stringListToLongSet( source.getStringsInitialized() ) );
}
}
else {
if ( source.hasStringsInitialized() ) {
target.setLongsInitialized( stringListToLongSet( source.getStringsInitialized() ) );
}
}
if ( target.getStringsWithDefault() != null ) {
if ( source.hasStringsWithDefault() ) {
target.getStringsWithDefault().clear();
target.getStringsWithDefault().addAll( source.getStringsWithDefault() );
}
else {
target.setStringsWithDefault( helper.toList( "3" ) );
}
}
else {
if ( source.hasStringsWithDefault() ) {
List<String> list2 = source.getStringsWithDefault();
target.setStringsWithDefault( new ArrayList<String>( list2 ) );
}
else {
target.setStringsWithDefault( helper.toList( "3" ) );
}
}
}
@Override
public Domain updateWithReturn(DtoWithPresenceCheck source, Domain target) {
if ( source == null ) {
return target;
}
if ( target.getStrings() != null ) {
if ( source.hasStrings() ) {
target.getStrings().clear();
target.getStrings().addAll( source.getStrings() );
}
}
else {
if ( source.hasStrings() ) {
List<String> list = source.getStrings();
target.setStrings( new LinkedHashSet<String>( list ) );
}
}
if ( target.getLongs() != null ) {
if ( source.hasStrings() ) {
target.getLongs().clear();
target.getLongs().addAll( stringListToLongSet( source.getStrings() ) );
}
}
else {
if ( source.hasStrings() ) {
target.setLongs( stringListToLongSet( source.getStrings() ) );
}
}
if ( target.getStringsInitialized() != null ) {
if ( source.hasStringsInitialized() ) {
target.getStringsInitialized().clear();
target.getStringsInitialized().addAll( source.getStringsInitialized() );
}
}
else {
if ( source.hasStringsInitialized() ) {
List<String> list1 = source.getStringsInitialized();
target.setStringsInitialized( new LinkedHashSet<String>( list1 ) );
}
}
if ( target.getLongsInitialized() != null ) {
if ( source.hasStringsInitialized() ) {
target.getLongsInitialized().clear();
target.getLongsInitialized().addAll( stringListToLongSet( source.getStringsInitialized() ) );
}
}
else {
if ( source.hasStringsInitialized() ) {
target.setLongsInitialized( stringListToLongSet( source.getStringsInitialized() ) );
}
}
if ( target.getStringsWithDefault() != null ) {
if ( source.hasStringsWithDefault() ) {
target.getStringsWithDefault().clear();
target.getStringsWithDefault().addAll( source.getStringsWithDefault() );
}
else {
target.setStringsWithDefault( helper.toList( "3" ) );
}
}
else {
if ( source.hasStringsWithDefault() ) {
List<String> list2 = source.getStringsWithDefault();
target.setStringsWithDefault( new ArrayList<String>( list2 ) );
}
else {
target.setStringsWithDefault( helper.toList( "3" ) );
}
}
return target;
}
protected Set<Long> stringListToLongSet(List<String> list) {
if ( list == null ) {
return null;
}
Set<Long> set = LinkedHashSet.newLinkedHashSet( list.size() );
for ( String string : list ) {
set.add( Long.parseLong( string ) );
}
return set;
}
}
| DomainDtoWithNcvsAlwaysMapperImpl |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/LoopSectionHelper.java | {
"start": 653,
"end": 6530
} | class ____ implements SectionHelper {
private static final String DEFAULT_ALIAS = "it";
private static final String ELSE = "else";
private static final String ALIAS = "alias";
private static final String ITERABLE = "iterable";
private final String alias;
private final Expression iterable;
private final SectionBlock elseBlock;
private final Engine engine;
private final String metadataPrefix;
LoopSectionHelper(SectionInitContext context, String metadataPrefix) {
this.alias = context.getParameterOrDefault(ALIAS, DEFAULT_ALIAS);
this.metadataPrefix = LoopSectionHelper.Factory.prefixValue(alias, metadataPrefix);
this.iterable = Objects.requireNonNull(context.getExpression(ITERABLE));
this.elseBlock = context.getBlock(ELSE);
this.engine = context.getEngine();
}
public String getMetadataPrefix() {
return metadataPrefix;
}
@Override
public CompletionStage<ResultNode> resolve(SectionResolutionContext context) {
return context.resolutionContext().evaluate(iterable).thenCompose(it -> {
if (it == null) {
// Treat null as no-op, as it is handled by SingleResultNode
return ResultNode.NOOP;
}
// Try to extract the capacity for collections, maps and arrays to avoid resize
List<CompletionStage<ResultNode>> results = new ArrayList<>(extractSize(it));
Iterator<?> iterator = extractIterator(it);
int idx = 0;
// Ideally, we should not block here but we still need to retain the order of results
while (iterator.hasNext()) {
results.add(nextElement(iterator.next(), idx++, iterator.hasNext(), context));
}
if (results.isEmpty()) {
// Execute the {#else} block if present
if (elseBlock != null) {
return context.execute(elseBlock, context.resolutionContext());
} else {
return ResultNode.NOOP;
}
}
if (results.size() == 1) {
return results.get(0);
}
return Results.process(results);
});
}
private static int extractSize(Object it) {
// Note that we intentionally use "instanceof" to test interfaces as the last resort in order to mitigate the "type pollution"
// See https://github.com/RedHatPerf/type-pollution-agent for more information
if (it instanceof AbstractCollection<?> collection) {
return collection.size();
} else if (it instanceof AbstractMap<?, ?> map) {
return map.size();
} else if (it.getClass().isArray()) {
return Array.getLength(it);
} else if (it instanceof Integer integer) {
return integer;
} else if (it instanceof Long longValue) {
return longValue.intValue();
} else if (it instanceof Collection<?> collection) {
return collection.size();
} else if (it instanceof Map<?, ?> map) {
return map.size();
}
return 10;
}
private Iterator<?> extractIterator(Object it) {
// Note that we intentionally use "instanceof" to test interfaces as the last resort in order to mitigate the "type pollution"
// See https://github.com/RedHatPerf/type-pollution-agent for more information
if (it instanceof AbstractCollection<?> col) {
return col.iterator();
} else if (it instanceof AbstractMap<?, ?> map) {
return map.entrySet().iterator();
} else if (it instanceof Integer integer) {
return IntStream.rangeClosed(1, integer).iterator();
} else if (it instanceof Long longValue) {
return LongStream.rangeClosed(1, longValue).iterator();
} else if (it.getClass().isArray()) {
int length = Array.getLength(it);
List<Object> elements = new ArrayList<>(length);
for (int i = 0; i < length; i++) {
// The val is automatically wrapped for primitive types
elements.add(Array.get(it, i));
}
return elements.iterator();
} else if (it instanceof Iterable<?> iterable) {
return iterable.iterator();
} else if (it instanceof Iterator<?> iterator) {
return iterator;
} else if (it instanceof Map<?, ?> map) {
return map.entrySet().iterator();
} else if (it instanceof Stream<?> stream) {
return stream.sequential().iterator();
} else {
TemplateException.Builder builder;
if (Results.isNotFound(it)) {
builder = engine.error("Iteration error - \\{{expr}} not found, use \\{{expr}.orEmpty} to ignore this error")
.code(Code.ITERABLE_NOT_FOUND)
.argument("expr", iterable.toOriginalString())
.origin(iterable.getOrigin());
} else {
builder = engine.error("Iteration error - \\{{expr}} resolved to [{clazz}] which is not iterable")
.code(Code.NOT_AN_ITERABLE)
.argument("expr", iterable.toOriginalString())
.argument("clazz", it.getClass().getName())
.origin(iterable.getOrigin());
}
throw builder.build();
}
}
CompletionStage<ResultNode> nextElement(Object element, int index, boolean hasNext, SectionResolutionContext context) {
ResolutionContext child = context.resolutionContext().createChild(
new IterationElement(element, index, hasNext),
null);
return context.execute(child);
}
public static | LoopSectionHelper |
java | apache__flink | flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/embedded/EmbeddedPythonKeyedProcessOperator.java | {
"start": 6396,
"end": 7056
} | class ____ {
private final TimerService timerService;
ContextImpl(TimerService timerService) {
this.timerService = timerService;
}
public long timestamp() {
return timestamp;
}
public TimerService timerService() {
return timerService;
}
@SuppressWarnings("unchecked")
public Object getCurrentKey() {
return keyConverter.toExternal(
(K)
((Row) EmbeddedPythonKeyedProcessOperator.this.getCurrentKey())
.getField(0));
}
}
private | ContextImpl |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/schemafilter/CatalogFilterTest.java | {
"start": 5588,
"end": 5842
} | class ____ {
@Id
private long id;
public long getId() {
return id;
}
public void setId( long id ) {
this.id = id;
}
}
@Entity
@jakarta.persistence.Table(name = "the_entity_3", catalog = "the_catalog_2")
public static | Catalog1Entity2 |
java | netty__netty | transport/src/main/java/io/netty/channel/socket/nio/NioDatagramChannel.java | {
"start": 2387,
"end": 22421
} | class ____
extends AbstractNioMessageChannel implements io.netty.channel.socket.DatagramChannel {
private static final ChannelMetadata METADATA = new ChannelMetadata(true, 16);
private static final SelectorProvider DEFAULT_SELECTOR_PROVIDER = SelectorProvider.provider();
private static final String EXPECTED_TYPES =
" (expected: " + StringUtil.simpleClassName(DatagramPacket.class) + ", " +
StringUtil.simpleClassName(AddressedEnvelope.class) + '<' +
StringUtil.simpleClassName(ByteBuf.class) + ", " +
StringUtil.simpleClassName(SocketAddress.class) + ">, " +
StringUtil.simpleClassName(ByteBuf.class) + ')';
private final DatagramChannelConfig config;
private Map<InetAddress, List<MembershipKey>> memberships;
/**
* Use the {@link SelectorProvider} to open {@link DatagramChannel} and so remove condition in
* {@link SelectorProvider#provider()} which is called by each DatagramChannel.open() otherwise.
* <p>
* See <a href="https://github.com/netty/netty/issues/2308">#2308</a>.
*/
private static DatagramChannel newSocket(SelectorProvider provider) {
try {
return provider.openDatagramChannel();
} catch (IOException e) {
throw new ChannelException("Failed to open a socket.", e);
}
}
private static DatagramChannel newSocket(SelectorProvider provider, SocketProtocolFamily ipFamily) {
if (ipFamily == null) {
return newSocket(provider);
}
try {
return provider.openDatagramChannel(ipFamily.toJdkFamily());
} catch (IOException e) {
throw new ChannelException("Failed to open a socket.", e);
}
}
/**
* Create a new instance which will use the Operation Systems default {@link SocketProtocolFamily}.
*/
public NioDatagramChannel() {
this(newSocket(DEFAULT_SELECTOR_PROVIDER));
}
/**
* Create a new instance using the given {@link SelectorProvider}
* which will use the Operation Systems default {@link SocketProtocolFamily}.
*/
public NioDatagramChannel(SelectorProvider provider) {
this(newSocket(provider));
}
/**
* Create a new instance using the given {@link InternetProtocolFamily}. If {@code null} is used it will depend
* on the Operation Systems default which will be chosen.
*
* @deprecated use {@link NioDatagramChannel#NioDatagramChannel(SocketProtocolFamily)}
*/
@Deprecated
public NioDatagramChannel(InternetProtocolFamily ipFamily) {
this(ipFamily == null ? null : ipFamily.toSocketProtocolFamily());
}
/**
* Create a new instance using the given {@link SocketProtocolFamily}. If {@code null} is used it will depend
* on the Operation Systems default which will be chosen.
*/
public NioDatagramChannel(SocketProtocolFamily protocolFamily) {
this(newSocket(DEFAULT_SELECTOR_PROVIDER, protocolFamily));
}
/**
* Create a new instance using the given {@link SelectorProvider} and {@link InternetProtocolFamily}.
* If {@link InternetProtocolFamily} is {@code null} it will depend on the Operation Systems default
* which will be chosen.
*
* @deprecated use {@link NioDatagramChannel#NioDatagramChannel(SelectorProvider, SocketProtocolFamily)}
*/
@Deprecated
public NioDatagramChannel(SelectorProvider provider, InternetProtocolFamily ipFamily) {
this(provider, ipFamily == null ? null : ipFamily.toSocketProtocolFamily());
}
/**
* Create a new instance using the given {@link SelectorProvider} and {@link SocketProtocolFamily}.
* If {@link SocketProtocolFamily} is {@code null} it will depend on the Operation Systems default
* which will be chosen.
*/
public NioDatagramChannel(SelectorProvider provider, SocketProtocolFamily protocolFamily) {
this(newSocket(provider, protocolFamily));
}
/**
* Create a new instance from the given {@link DatagramChannel}.
*/
public NioDatagramChannel(DatagramChannel socket) {
super(null, socket, SelectionKey.OP_READ);
config = new NioDatagramChannelConfig(this, socket);
}
@Override
public ChannelMetadata metadata() {
return METADATA;
}
@Override
public DatagramChannelConfig config() {
return config;
}
@Override
@SuppressWarnings("deprecation")
public boolean isActive() {
DatagramChannel ch = javaChannel();
return ch.isOpen() && (
config.getOption(ChannelOption.DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION) && isRegistered()
|| ch.socket().isBound());
}
@Override
public boolean isConnected() {
return javaChannel().isConnected();
}
@Override
protected DatagramChannel javaChannel() {
return (DatagramChannel) super.javaChannel();
}
@Override
protected SocketAddress localAddress0() {
return javaChannel().socket().getLocalSocketAddress();
}
@Override
protected SocketAddress remoteAddress0() {
return javaChannel().socket().getRemoteSocketAddress();
}
@Override
protected void doBind(SocketAddress localAddress) throws Exception {
doBind0(localAddress);
}
private void doBind0(SocketAddress localAddress) throws Exception {
SocketUtils.bind(javaChannel(), localAddress);
}
@Override
protected boolean doConnect(SocketAddress remoteAddress,
SocketAddress localAddress) throws Exception {
if (localAddress != null) {
doBind0(localAddress);
}
boolean success = false;
try {
javaChannel().connect(remoteAddress);
success = true;
return true;
} finally {
if (!success) {
doClose();
}
}
}
@Override
protected void doFinishConnect() throws Exception {
throw new UnsupportedOperationException("finishConnect is not supported for " + getClass().getName());
}
@Override
protected void doDisconnect() throws Exception {
javaChannel().disconnect();
}
@Override
protected void doClose() throws Exception {
javaChannel().close();
}
@Override
protected int doReadMessages(List<Object> buf) throws Exception {
DatagramChannel ch = javaChannel();
DatagramChannelConfig config = config();
RecvByteBufAllocator.Handle allocHandle = unsafe().recvBufAllocHandle();
ByteBuf data = allocHandle.allocate(config.getAllocator());
allocHandle.attemptedBytesRead(data.writableBytes());
boolean free = true;
try {
ByteBuffer nioData = data.internalNioBuffer(data.writerIndex(), data.writableBytes());
int pos = nioData.position();
InetSocketAddress remoteAddress = (InetSocketAddress) ch.receive(nioData);
if (remoteAddress == null) {
return 0;
}
allocHandle.lastBytesRead(nioData.position() - pos);
buf.add(new DatagramPacket(data.writerIndex(data.writerIndex() + allocHandle.lastBytesRead()),
localAddress(), remoteAddress));
free = false;
return 1;
} catch (Throwable cause) {
PlatformDependent.throwException(cause);
return -1;
} finally {
if (free) {
data.release();
}
}
}
@Override
protected boolean doWriteMessage(Object msg, ChannelOutboundBuffer in) throws Exception {
final SocketAddress remoteAddress;
final ByteBuf data;
if (msg instanceof AddressedEnvelope) {
@SuppressWarnings("unchecked")
AddressedEnvelope<ByteBuf, SocketAddress> envelope = (AddressedEnvelope<ByteBuf, SocketAddress>) msg;
remoteAddress = envelope.recipient();
data = envelope.content();
} else {
data = (ByteBuf) msg;
remoteAddress = null;
}
final int dataLen = data.readableBytes();
if (dataLen == 0) {
return true;
}
final ByteBuffer nioData = data.nioBufferCount() == 1 ? data.internalNioBuffer(data.readerIndex(), dataLen)
: data.nioBuffer(data.readerIndex(), dataLen);
final int writtenBytes;
if (remoteAddress != null) {
writtenBytes = javaChannel().send(nioData, remoteAddress);
} else {
writtenBytes = javaChannel().write(nioData);
}
return writtenBytes > 0;
}
private static void checkUnresolved(AddressedEnvelope<?, ?> envelope) {
if (envelope.recipient() instanceof InetSocketAddress
&& (((InetSocketAddress) envelope.recipient()).isUnresolved())) {
throw new UnresolvedAddressException();
}
}
@Override
protected Object filterOutboundMessage(Object msg) {
if (msg instanceof DatagramPacket) {
DatagramPacket p = (DatagramPacket) msg;
checkUnresolved(p);
ByteBuf content = p.content();
if (isSingleDirectBuffer(content)) {
return p;
}
return new DatagramPacket(newDirectBuffer(p, content), p.recipient());
}
if (msg instanceof ByteBuf) {
ByteBuf buf = (ByteBuf) msg;
if (isSingleDirectBuffer(buf)) {
return buf;
}
return newDirectBuffer(buf);
}
if (msg instanceof AddressedEnvelope) {
@SuppressWarnings("unchecked")
AddressedEnvelope<Object, SocketAddress> e = (AddressedEnvelope<Object, SocketAddress>) msg;
checkUnresolved(e);
if (e.content() instanceof ByteBuf) {
ByteBuf content = (ByteBuf) e.content();
if (isSingleDirectBuffer(content)) {
return e;
}
return new DefaultAddressedEnvelope<ByteBuf, SocketAddress>(newDirectBuffer(e, content), e.recipient());
}
}
throw new UnsupportedOperationException(
"unsupported message type: " + StringUtil.simpleClassName(msg) + EXPECTED_TYPES);
}
/**
* Checks if the specified buffer is a direct buffer and is composed of a single NIO buffer.
* (We check this because otherwise we need to make it a non-composite buffer.)
*/
private static boolean isSingleDirectBuffer(ByteBuf buf) {
return buf.isDirect() && buf.nioBufferCount() == 1;
}
@Override
protected boolean continueOnWriteError() {
// Continue on write error as a DatagramChannel can write to multiple remote peers
//
// See https://github.com/netty/netty/issues/2665
return true;
}
@Override
public InetSocketAddress localAddress() {
return (InetSocketAddress) super.localAddress();
}
@Override
public InetSocketAddress remoteAddress() {
return (InetSocketAddress) super.remoteAddress();
}
@Override
public ChannelFuture joinGroup(InetAddress multicastAddress) {
return joinGroup(multicastAddress, newPromise());
}
@Override
public ChannelFuture joinGroup(InetAddress multicastAddress, ChannelPromise promise) {
try {
NetworkInterface iface = config.getNetworkInterface();
if (iface == null) {
iface = NetworkInterface.getByInetAddress(localAddress().getAddress());
}
return joinGroup(
multicastAddress, iface, null, promise);
} catch (SocketException e) {
promise.setFailure(e);
}
return promise;
}
@Override
public ChannelFuture joinGroup(
InetSocketAddress multicastAddress, NetworkInterface networkInterface) {
return joinGroup(multicastAddress, networkInterface, newPromise());
}
@Override
public ChannelFuture joinGroup(
InetSocketAddress multicastAddress, NetworkInterface networkInterface,
ChannelPromise promise) {
return joinGroup(multicastAddress.getAddress(), networkInterface, null, promise);
}
@Override
public ChannelFuture joinGroup(
InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress source) {
return joinGroup(multicastAddress, networkInterface, source, newPromise());
}
@Override
public ChannelFuture joinGroup(
InetAddress multicastAddress, NetworkInterface networkInterface,
InetAddress source, ChannelPromise promise) {
ObjectUtil.checkNotNull(multicastAddress, "multicastAddress");
ObjectUtil.checkNotNull(networkInterface, "networkInterface");
try {
MembershipKey key;
if (source == null) {
key = javaChannel().join(multicastAddress, networkInterface);
} else {
key = javaChannel().join(multicastAddress, networkInterface, source);
}
synchronized (this) {
List<MembershipKey> keys = null;
if (memberships == null) {
memberships = new HashMap<InetAddress, List<MembershipKey>>();
} else {
keys = memberships.get(multicastAddress);
}
if (keys == null) {
keys = new ArrayList<MembershipKey>();
memberships.put(multicastAddress, keys);
}
keys.add(key);
}
promise.setSuccess();
} catch (Throwable e) {
promise.setFailure(e);
}
return promise;
}
@Override
public ChannelFuture leaveGroup(InetAddress multicastAddress) {
return leaveGroup(multicastAddress, newPromise());
}
@Override
public ChannelFuture leaveGroup(InetAddress multicastAddress, ChannelPromise promise) {
try {
return leaveGroup(
multicastAddress, NetworkInterface.getByInetAddress(localAddress().getAddress()), null, promise);
} catch (SocketException e) {
promise.setFailure(e);
}
return promise;
}
@Override
public ChannelFuture leaveGroup(
InetSocketAddress multicastAddress, NetworkInterface networkInterface) {
return leaveGroup(multicastAddress, networkInterface, newPromise());
}
@Override
public ChannelFuture leaveGroup(
InetSocketAddress multicastAddress,
NetworkInterface networkInterface, ChannelPromise promise) {
return leaveGroup(multicastAddress.getAddress(), networkInterface, null, promise);
}
@Override
public ChannelFuture leaveGroup(
InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress source) {
return leaveGroup(multicastAddress, networkInterface, source, newPromise());
}
@Override
public ChannelFuture leaveGroup(
InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress source,
ChannelPromise promise) {
ObjectUtil.checkNotNull(multicastAddress, "multicastAddress");
ObjectUtil.checkNotNull(networkInterface, "networkInterface");
synchronized (this) {
if (memberships != null) {
List<MembershipKey> keys = memberships.get(multicastAddress);
if (keys != null) {
Iterator<MembershipKey> keyIt = keys.iterator();
while (keyIt.hasNext()) {
MembershipKey key = keyIt.next();
if (networkInterface.equals(key.networkInterface())) {
if (source == null && key.sourceAddress() == null ||
source != null && source.equals(key.sourceAddress())) {
key.drop();
keyIt.remove();
}
}
}
if (keys.isEmpty()) {
memberships.remove(multicastAddress);
}
}
}
}
promise.setSuccess();
return promise;
}
/**
* Block the given sourceToBlock address for the given multicastAddress on the given networkInterface
*/
@Override
public ChannelFuture block(
InetAddress multicastAddress, NetworkInterface networkInterface,
InetAddress sourceToBlock) {
return block(multicastAddress, networkInterface, sourceToBlock, newPromise());
}
/**
* Block the given sourceToBlock address for the given multicastAddress on the given networkInterface
*/
@Override
public ChannelFuture block(
InetAddress multicastAddress, NetworkInterface networkInterface,
InetAddress sourceToBlock, ChannelPromise promise) {
ObjectUtil.checkNotNull(multicastAddress, "multicastAddress");
ObjectUtil.checkNotNull(sourceToBlock, "sourceToBlock");
ObjectUtil.checkNotNull(networkInterface, "networkInterface");
synchronized (this) {
if (memberships != null) {
List<MembershipKey> keys = memberships.get(multicastAddress);
for (MembershipKey key: keys) {
if (networkInterface.equals(key.networkInterface())) {
try {
key.block(sourceToBlock);
} catch (IOException e) {
promise.setFailure(e);
}
}
}
}
}
promise.setSuccess();
return promise;
}
/**
* Block the given sourceToBlock address for the given multicastAddress
*
*/
@Override
public ChannelFuture block(InetAddress multicastAddress, InetAddress sourceToBlock) {
return block(multicastAddress, sourceToBlock, newPromise());
}
/**
* Block the given sourceToBlock address for the given multicastAddress
*
*/
@Override
public ChannelFuture block(
InetAddress multicastAddress, InetAddress sourceToBlock, ChannelPromise promise) {
try {
return block(
multicastAddress,
NetworkInterface.getByInetAddress(localAddress().getAddress()),
sourceToBlock, promise);
} catch (SocketException e) {
promise.setFailure(e);
}
return promise;
}
@Override
@Deprecated
protected void setReadPending(boolean readPending) {
super.setReadPending(readPending);
}
void clearReadPending0() {
clearReadPending();
}
@Override
protected boolean closeOnReadError(Throwable cause) {
// We do not want to close on SocketException when using DatagramChannel as we usually can continue receiving.
// See https://github.com/netty/netty/issues/5893
if (cause instanceof SocketException) {
return false;
}
return super.closeOnReadError(cause);
}
@Override
protected boolean continueReading(RecvByteBufAllocator.Handle allocHandle) {
if (allocHandle instanceof RecvByteBufAllocator.ExtendedHandle) {
// We use the TRUE_SUPPLIER as it is also ok to read less then what we did try to read (as long
// as we read anything).
return ((RecvByteBufAllocator.ExtendedHandle) allocHandle)
.continueReading(UncheckedBooleanSupplier.TRUE_SUPPLIER);
}
return allocHandle.continueReading();
}
}
| NioDatagramChannel |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/singlepersistenceunit/SinglePersistenceUnitCdiSessionTest.java | {
"start": 676,
"end": 2643
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(DefaultEntity.class)
.addAsResource("application.properties"));
@Inject
Session session;
@Test
@Transactional
public void inTransaction() {
DefaultEntity defaultEntity = new DefaultEntity("default");
session.persist(defaultEntity);
DefaultEntity savedDefaultEntity = session.get(DefaultEntity.class, defaultEntity.getId());
assertEquals(defaultEntity.getName(), savedDefaultEntity.getName());
}
@Test
@ActivateRequestContext
public void inRequestNoTransaction() {
// Reads are allowed
assertThatCode(() -> session.createQuery("select count(*) from DefaultEntity"))
.doesNotThrowAnyException();
// Writes are not
DefaultEntity defaultEntity = new DefaultEntity("default");
assertThatThrownBy(() -> session.persist(defaultEntity))
.isInstanceOf(TransactionRequiredException.class)
.hasMessageContaining(
"Transaction is not active, consider adding @Transactional to your method to automatically activate one");
}
@Test
public void noRequestNoTransaction() {
DefaultEntity defaultEntity = new DefaultEntity("default");
assertThatThrownBy(() -> session.persist(defaultEntity))
.isInstanceOf(ContextNotActiveException.class)
.hasMessageContainingAll(
"Cannot use the EntityManager/Session because neither a transaction nor a CDI request context is active",
"Consider adding @Transactional to your method to automatically activate a transaction",
"@ActivateRequestContext if you have valid reasons not to use transactions");
}
}
| SinglePersistenceUnitCdiSessionTest |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-triple/src/test/java/org/apache/dubbo/rpc/protocol/tri/rest/filter/TestRestFilterFactory.java | {
"start": 964,
"end": 1176
} | class ____ implements RestExtension, Supplier<RestFilter> {
@Override
public RestFilter get() {
return new TestRestFilter(100, "/filter/*", "/*.filter", "!/filter/one");
}
}
| TestRestFilterFactory |
java | playframework__playframework | documentation/manual/working/javaGuide/main/forms/code/javaguide/forms/JavaForms.java | {
"start": 15365,
"end": 16628
} | class ____ extends MockJavaAction {
private final MessagesApi messagesApi;
PartialFormLoginController(
JavaHandlerComponents javaHandlerComponents, MessagesApi messagesApi) {
super(javaHandlerComponents);
this.messagesApi = messagesApi;
}
public Result index(Http.Request request) {
// #partial-validate-login
Form<PartialUserForm> form =
formFactory().form(PartialUserForm.class, LoginCheck.class).bindFromRequest(request);
// #partial-validate-login
Messages messages = this.messagesApi.preferred(request);
if (form.hasErrors()) {
return badRequest(javaguide.forms.html.view.render(form, messages));
} else {
PartialUserForm user = form.get();
return ok("Got user " + user);
}
}
}
@Test
public void partialFormDefaultValidation() {
Result result =
call(
new PartialFormDefaultController(
instanceOf(JavaHandlerComponents.class), instanceOf(MessagesApi.class)),
fakeRequest("POST", "/").bodyForm(ImmutableMap.of()),
mat);
// Run it through the template
assertThat(contentAsString(result)).contains("This field is required");
}
public | PartialFormLoginController |
java | netty__netty | example/src/main/java/io/netty/example/http2/tiles/Launcher.java | {
"start": 1389,
"end": 1981
} | class ____ {
public static void main(String[] args) {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
Http2Server http2 = new Http2Server(group);
HttpServer http = new HttpServer(group);
try {
http2.start();
System.err.println("Open your web browser and navigate to " + "http://" + Html.IP + ":" + HttpServer.PORT);
http.start().sync();
} catch (Exception e) {
e.printStackTrace();
} finally {
group.shutdownGracefully();
}
}
}
| Launcher |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/flush/Book.java | {
"start": 457,
"end": 1152
} | class ____ {
private Long id;
private String title;
private Author author;
public Book() {
}
public Book(String title, Author author) {
this.title = title;
this.author = author;
}
@Id
@GeneratedValue( generator = "increment" )
@GenericGenerator( name = "increment", strategy = "increment" )
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
@Column(name="`title`")
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
@ManyToOne( cascade = CascadeType.ALL )
public Author getAuthor() {
return author;
}
public void setAuthor(Author author) {
this.author = author;
}
}
| Book |
java | apache__maven | compat/maven-model-builder/src/main/java/org/apache/maven/model/composition/DefaultDependencyManagementImporter.java | {
"start": 1504,
"end": 2832
} | class ____ implements DependencyManagementImporter {
@Override
public void importManagement(
Model target,
List<? extends DependencyManagement> sources,
ModelBuildingRequest request,
ModelProblemCollector problems) {
if (sources != null && !sources.isEmpty()) {
Map<String, Dependency> dependencies = new LinkedHashMap<>();
DependencyManagement depMgmt = target.getDependencyManagement();
if (depMgmt != null) {
for (Dependency dependency : depMgmt.getDependencies()) {
dependencies.put(dependency.getManagementKey(), dependency);
}
} else {
depMgmt = new DependencyManagement();
target.setDependencyManagement(depMgmt);
}
for (DependencyManagement source : sources) {
for (Dependency dependency : source.getDependencies()) {
String key = dependency.getManagementKey();
if (!dependencies.containsKey(key)) {
dependencies.put(key, dependency);
}
}
}
depMgmt.setDependencies(new ArrayList<>(dependencies.values()));
}
}
}
| DefaultDependencyManagementImporter |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/internal/StatefulPersistenceContext.java | {
"start": 61280,
"end": 71943
} | interface ____<E> {
void serialize(E element, ObjectOutputStream oos) throws IOException;
}
private <K, V> void writeMapToStream(
Map<K, V> map,
ObjectOutputStream oos,
String keysName,
Serializer<Entry<K, V>> serializer) throws IOException {
if ( map == null ) {
oos.writeInt( 0 );
}
else {
writeCollectionToStream( map.entrySet(), oos, keysName, serializer );
}
}
private <E> void writeCollectionToStream(
Collection<E> collection,
ObjectOutputStream oos,
String keysName,
Serializer<E> serializer) throws IOException {
if ( collection == null ) {
oos.writeInt( 0 );
}
else {
final int size = collection.size();
oos.writeInt( size );
PERSISTENCE_CONTEXT_LOGGER.startingSerializationOfEntries( size, keysName );
for ( E entry : collection ) {
serializer.serialize( entry, oos );
}
}
}
/**
* Used by the owning session to explicitly control deserialization of the persistence context.
*
* @param ois The stream from which the persistence context should be read
* @param session The owning session
*
* @return The deserialized StatefulPersistenceContext
*
* @throws IOException deserialization errors.
* @throws ClassNotFoundException deserialization errors.
*/
public static StatefulPersistenceContext deserialize(ObjectInputStream ois, SessionImplementor session)
throws IOException, ClassNotFoundException {
PERSISTENCE_CONTEXT_LOGGER.deserializingPersistenceContext();
final var context = new StatefulPersistenceContext( session );
final var factory = session.getFactory();
// during deserialization, we need to reconnect all proxies and
// collections to this session, as well as the EntityEntry and
// CollectionEntry instances; these associations are transient
// because serialization is used for different things.
try {
context.defaultReadOnly = ois.readBoolean();
// todo : we can actually just determine this from the incoming EntityEntry-s
context.hasNonReadOnlyEntities = ois.readBoolean();
final boolean traceEnabled = PERSISTENCE_CONTEXT_LOGGER.isTraceEnabled();
{
int count = ois.readInt();
if ( traceEnabled ) {
PERSISTENCE_CONTEXT_LOGGER.startingDeserializationOfEntries( count, "entitiesByUniqueKey" );
}
if ( count != 0 ) {
context.entitiesByUniqueKey = mapOfSize( Math.max( count, INIT_COLL_SIZE ) );
for ( int i = 0; i < count; i++ ) {
context.entitiesByUniqueKey.put( EntityUniqueKey.deserialize( ois, session ), ois.readObject() );
}
}
}
{
final int count = ois.readInt();
if ( traceEnabled ) {
PERSISTENCE_CONTEXT_LOGGER.startingDeserializationOfEntries( count, "entitySnapshotsByKey" );
}
context.entitySnapshotsByKey = mapOfSize( Math.max( count, INIT_COLL_SIZE ) );
for ( int i = 0; i < count; i++ ) {
context.entitySnapshotsByKey.put( EntityKey.deserialize( ois, factory ), ois.readObject() );
}
}
context.entityEntryContext = EntityEntryContext.deserialize( ois, context );
{
final int count = ois.readInt();
if ( traceEnabled ) {
PERSISTENCE_CONTEXT_LOGGER.startingDeserializationOfEntries( count, "entitiesByKey" );
}
context.entitiesByKey = mapOfSize( Math.max( count, INIT_COLL_SIZE ) );
final var metamodel = factory.getMappingMetamodel();
for ( int i = 0; i < count; i++ ) {
final var entityKey = EntityKey.deserialize( ois, factory );
final var persister = metamodel.getEntityDescriptor( (String) ois.readObject() );
final Object entity = ois.readObject();
final Object proxy = ois.readObject();
final var state = (EntityHolderState) ois.readObject();
final var holder = new EntityHolderImpl().withEntity( entityKey, persister, entity );
holder.state = state;
if ( proxy != null ) {
final var lazyInitializer = extractLazyInitializer( proxy );
if ( lazyInitializer != null ) {
lazyInitializer.setSession( session );
holder.proxy = proxy;
}
else {
// otherwise, the proxy was pruned during the serialization process
if ( traceEnabled ) {
PERSISTENCE_CONTEXT_LOGGER.encounteredPrunedProxy();
}
}
}
holder.setEntityEntry( context.entityEntryContext.getEntityEntry( entity ) );
context.entitiesByKey.put( entityKey, holder );
}
}
{
final int count = ois.readInt();
if ( traceEnabled ) {
PERSISTENCE_CONTEXT_LOGGER.startingDeserializationOfEntries( count, "collectionsByKey" );
}
context.collectionsByKey = mapOfSize( Math.max( count, INIT_COLL_SIZE ) );
for ( int i = 0; i < count; i++ ) {
context.collectionsByKey.put( CollectionKey.deserialize( ois, session ),
(PersistentCollection<?>) ois.readObject() );
}
}
{
final int count = ois.readInt();
if ( traceEnabled ) {
PERSISTENCE_CONTEXT_LOGGER.startingDeserializationOfEntries( count, "collectionEntries" );
}
for ( int i = 0; i < count; i++ ) {
final var collection = (PersistentCollection<?>) ois.readObject();
collection.setCurrentSession( session );
context.putCollectionEntry( collection,
CollectionEntry.deserialize( ois, session ) );
}
}
{
final int count = ois.readInt();
if ( traceEnabled ) {
PERSISTENCE_CONTEXT_LOGGER.startingDeserializationOfEntries( count, "arrayHolders" );
}
if ( count != 0 ) {
context.arrayHolders = new IdentityHashMap<>( Math.max( count, INIT_COLL_SIZE ) );
for ( int i = 0; i < count; i++ ) {
context.arrayHolders.put( ois.readObject(),
(PersistentCollection<?>) ois.readObject() );
}
}
}
{
final int count = ois.readInt();
if ( traceEnabled ) {
PERSISTENCE_CONTEXT_LOGGER.startingDeserializationOfEntries( count, "nullifiableEntityKey" );
}
context.nullifiableEntityKeys = new HashSet<>();
for ( int i = 0; i < count; i++ ) {
context.nullifiableEntityKeys.add( EntityKey.deserialize( ois, factory ) );
}
}
{
final int count = ois.readInt();
if ( traceEnabled ) {
PERSISTENCE_CONTEXT_LOGGER.startingDeserializationOfEntries( count, "deletedUnloadedEntityKeys" );
}
context.deletedUnloadedEntityKeys = new HashSet<>();
for ( int i = 0; i < count; i++ ) {
context.deletedUnloadedEntityKeys.add( EntityKey.deserialize( ois, factory ) );
}
}
}
catch ( HibernateException he ) {
throw new InvalidObjectException( he.getMessage() );
}
return context;
}
@Override
public void addChildParent(Object child, Object parent) {
if ( parentsByChild == null ) {
parentsByChild = new IdentityHashMap<>( INIT_COLL_SIZE );
}
parentsByChild.put( child, parent );
}
@Override
public void removeChildParent(Object child) {
if ( parentsByChild != null ) {
parentsByChild.remove( child );
}
}
// INSERTED KEYS HANDLING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
private HashMap<String,HashSet<Object>> insertedKeysMap;
@Override
public void registerInsertedKey(EntityPersister persister, Object id) {
// we only are worried about registering these if the persister defines caching
if ( persister.canWriteToCache() ) {
if ( insertedKeysMap == null ) {
insertedKeysMap = new HashMap<>();
}
final var insertedEntityIds =
insertedKeysMap.computeIfAbsent( persister.getRootEntityName(),
k -> new HashSet<>() );
insertedEntityIds.add( id );
}
}
@Override
public boolean wasInsertedDuringTransaction(EntityPersister persister, Object id) {
// again, we only really care if the entity is cached
if ( persister.canWriteToCache() ) {
if ( insertedKeysMap != null ) {
final var insertedEntityIds =
insertedKeysMap.get( persister.getRootEntityName() );
if ( insertedEntityIds != null ) {
return insertedEntityIds.contains( id );
}
}
}
return false;
}
@Override
public boolean containsNullifiableEntityKey(Supplier<EntityKey> sek) {
return nullifiableEntityKeys != null
&& !nullifiableEntityKeys.isEmpty()
&& nullifiableEntityKeys.contains( sek.get() );
}
@Override
public void registerNullifiableEntityKey(EntityKey key) {
if ( nullifiableEntityKeys == null ) {
nullifiableEntityKeys = new HashSet<>();
}
nullifiableEntityKeys.add( key );
}
@Override
public boolean isNullifiableEntityKeysEmpty() {
return nullifiableEntityKeys == null
|| nullifiableEntityKeys.isEmpty();
}
@Override
public boolean containsDeletedUnloadedEntityKey(EntityKey ek) {
return deletedUnloadedEntityKeys != null
&& deletedUnloadedEntityKeys.contains( ek );
}
@Override
public void registerDeletedUnloadedEntityKey(EntityKey key) {
if ( deletedUnloadedEntityKeys == null ) {
deletedUnloadedEntityKeys = new HashSet<>();
}
deletedUnloadedEntityKeys.add( key );
}
@Override
public void removeDeletedUnloadedEntityKey(EntityKey key) {
assert deletedUnloadedEntityKeys != null;
deletedUnloadedEntityKeys.remove( key );
}
@Override
public boolean containsDeletedUnloadedEntityKeys() {
return deletedUnloadedEntityKeys != null && !deletedUnloadedEntityKeys.isEmpty();
}
@Override
public int getCollectionEntriesSize() {
return collectionEntries == null ? 0 : collectionEntries.size();
}
@Override
public CollectionEntry removeCollectionEntry(PersistentCollection<?> collection) {
if ( collectionEntries != null ) {
final int instanceId = collection.$$_hibernate_getInstanceId();
collection.$$_hibernate_setInstanceId( 0 );
return collectionEntries.remove( instanceId, collection );
}
else {
return null;
}
}
@Override
public void clearCollectionsByKey() {
if ( collectionsByKey != null ) {
// A valid alternative would be to set this to null, like we do on close.
// The difference being that in this case we expect the collection will be
// used again, so we bet that clear() might allow us to skip having to
// re-allocate the collection.
collectionsByKey.clear();
}
}
@Override
public PersistentCollection<?> addCollectionByKey(CollectionKey collectionKey, PersistentCollection<?> collection) {
if ( collectionsByKey == null ) {
collectionsByKey = mapOfSize( INIT_COLL_SIZE );
}
return collectionsByKey.put( collectionKey, collection );
}
@Override
public void removeCollectionByKey(CollectionKey collectionKey) {
if ( collectionsByKey != null ) {
collectionsByKey.remove( collectionKey );
}
}
private void cleanUpInsertedKeysAfterTransaction() {
if ( insertedKeysMap != null ) {
insertedKeysMap.clear();
}
}
private static | Serializer |
java | apache__dubbo | dubbo-spring-boot-project/dubbo-spring-boot-actuator/src/main/java/org/apache/dubbo/spring/boot/actuate/health/DubboHealthIndicator.java | {
"start": 1713,
"end": 7378
} | class ____ extends AbstractHealthIndicator {
@Autowired
private DubboHealthIndicatorProperties dubboHealthIndicatorProperties;
// @Autowired(required = false)
private Map<String, ProtocolConfig> protocolConfigs = Collections.emptyMap();
// @Autowired(required = false)
private Map<String, ProviderConfig> providerConfigs = Collections.emptyMap();
@Autowired
private ConfigManager configManager;
@Autowired
private ApplicationModel applicationModel;
@Override
protected void doHealthCheck(Health.Builder builder) throws Exception {
ExtensionLoader<StatusChecker> extensionLoader = applicationModel.getExtensionLoader(StatusChecker.class);
Map<String, String> statusCheckerNamesMap = resolveStatusCheckerNamesMap();
boolean hasError = false;
boolean hasUnknown = false;
// Up first
builder.up();
for (Map.Entry<String, String> entry : statusCheckerNamesMap.entrySet()) {
String statusCheckerName = entry.getKey();
String source = entry.getValue();
StatusChecker checker = extensionLoader.getExtension(statusCheckerName);
org.apache.dubbo.common.status.Status status = checker.check();
org.apache.dubbo.common.status.Status.Level level = status.getLevel();
if (!hasError && level.equals(org.apache.dubbo.common.status.Status.Level.ERROR)) {
hasError = true;
builder.down();
}
if (!hasError && !hasUnknown && level.equals(org.apache.dubbo.common.status.Status.Level.UNKNOWN)) {
hasUnknown = true;
builder.unknown();
}
Map<String, Object> detail = new LinkedHashMap<>();
detail.put("source", source);
detail.put("status", status);
builder.withDetail(statusCheckerName, detail);
}
}
/**
* Resolves the map of {@link StatusChecker}'s name and its' source.
*
* @return non-null {@link Map}
*/
protected Map<String, String> resolveStatusCheckerNamesMap() {
Map<String, String> statusCheckerNamesMap = new LinkedHashMap<>();
statusCheckerNamesMap.putAll(resolveStatusCheckerNamesMapFromDubboHealthIndicatorProperties());
statusCheckerNamesMap.putAll(resolveStatusCheckerNamesMapFromProtocolConfigs());
statusCheckerNamesMap.putAll(resolveStatusCheckerNamesMapFromProviderConfig());
return statusCheckerNamesMap;
}
private Map<String, String> resolveStatusCheckerNamesMapFromDubboHealthIndicatorProperties() {
DubboHealthIndicatorProperties.Status status = dubboHealthIndicatorProperties.getStatus();
Map<String, String> statusCheckerNamesMap = new LinkedHashMap<>();
for (String statusName : status.getDefaults()) {
statusCheckerNamesMap.put(statusName, DubboHealthIndicatorProperties.PREFIX + ".status.defaults");
}
for (String statusName : status.getExtras()) {
statusCheckerNamesMap.put(statusName, DubboHealthIndicatorProperties.PREFIX + ".status.extras");
}
return statusCheckerNamesMap;
}
private Map<String, String> resolveStatusCheckerNamesMapFromProtocolConfigs() {
if (protocolConfigs.isEmpty()) {
protocolConfigs = configManager.getConfigsMap(ProtocolConfig.class);
}
Map<String, String> statusCheckerNamesMap = new LinkedHashMap<>();
for (Map.Entry<String, ProtocolConfig> entry : protocolConfigs.entrySet()) {
String beanName = entry.getKey();
ProtocolConfig protocolConfig = entry.getValue();
Set<String> statusCheckerNames = getStatusCheckerNames(protocolConfig);
for (String statusCheckerName : statusCheckerNames) {
String source = buildSource(beanName, protocolConfig);
statusCheckerNamesMap.put(statusCheckerName, source);
}
}
return statusCheckerNamesMap;
}
private Map<String, String> resolveStatusCheckerNamesMapFromProviderConfig() {
if (providerConfigs.isEmpty()) {
providerConfigs = new LinkedHashMap<>();
for (ModuleModel moduleModel : applicationModel.getModuleModels()) {
providerConfigs.putAll(moduleModel.getConfigManager().getConfigsMap(ProviderConfig.class));
}
}
Map<String, String> statusCheckerNamesMap = new LinkedHashMap<>();
for (Map.Entry<String, ProviderConfig> entry : providerConfigs.entrySet()) {
String beanName = entry.getKey();
ProviderConfig providerConfig = entry.getValue();
Set<String> statusCheckerNames = getStatusCheckerNames(providerConfig);
for (String statusCheckerName : statusCheckerNames) {
String source = buildSource(beanName, providerConfig);
statusCheckerNamesMap.put(statusCheckerName, source);
}
}
return statusCheckerNamesMap;
}
private Set<String> getStatusCheckerNames(ProtocolConfig protocolConfig) {
String status = protocolConfig.getStatus();
return StringUtils.commaDelimitedListToSet(status);
}
private Set<String> getStatusCheckerNames(ProviderConfig providerConfig) {
String status = providerConfig.getStatus();
return StringUtils.commaDelimitedListToSet(status);
}
private String buildSource(String beanName, Object bean) {
return beanName + "@" + bean.getClass().getSimpleName() + ".getStatus()";
}
}
| DubboHealthIndicator |
java | elastic__elasticsearch | modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java | {
"start": 4082,
"end": 4328
} | class ____ extends ParentChildTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopy(super.nodePlugins(), CustomScriptPlugin.class);
}
public static | InnerHitsIT |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/BindableRuntimeHintsRegistrarTests.java | {
"start": 20323,
"end": 20527
} | class ____ {
@NestedConfigurationProperty
private @Nullable GenericObject<?> generic;
public @Nullable GenericObject<?> getGeneric() {
return this.generic;
}
}
public static final | WithGeneric |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/security/authenticator/LoginManager.java | {
"start": 4681,
"end": 4796
} | class ____
* chosen based on this mechanism.
* @param defaultLoginClass Default login | is |
java | spring-projects__spring-boot | module/spring-boot-pulsar/src/test/java/org/springframework/boot/pulsar/autoconfigure/PulsarAutoConfigurationTests.java | {
"start": 33133,
"end": 33422
} | class ____ {
@Bean
@Order(200)
ProducerInterceptor interceptorFoo() {
return mock(ProducerInterceptor.class);
}
@Bean
@Order(100)
ProducerInterceptor interceptorBar() {
return mock(ProducerInterceptor.class);
}
}
}
@Nested
| InterceptorTestConfiguration |
java | apache__logging-log4j2 | log4j-layout-template-json/src/main/java/org/apache/logging/log4j/layout/template/json/util/DummyRecycler.java | {
"start": 908,
"end": 1229
} | class ____<V> implements Recycler<V> {
private final Supplier<V> supplier;
public DummyRecycler(final Supplier<V> supplier) {
this.supplier = supplier;
}
@Override
public V acquire() {
return supplier.get();
}
@Override
public void release(final V value) {}
}
| DummyRecycler |
java | spring-projects__spring-framework | buildSrc/src/main/java/org/springframework/build/shadow/ShadowSource.java | {
"start": 5039,
"end": 5817
} | class ____ {
private final String pattern;
private final String pathPattern;
private final String destination;
private final String pathDestination;
Relocation(String pattern, String destination) {
this.pattern = pattern;
this.pathPattern = pattern.replace('.', '/');
this.destination = destination;
this.pathDestination = destination.replace('.', '/');
}
@Input
public String getPattern() {
return this.pattern;
}
@Input
public String getDestination() {
return this.destination;
}
String relocatePath(String path) {
return path.replace(this.pathPattern, this.pathDestination);
}
public String transformContent(String content) {
return content.replaceAll("\\b" + this.pattern, this.destination);
}
}
}
| Relocation |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/util/MimeTypes.java | {
"start": 909,
"end": 1265
} | class ____ resolves file extensions to MIME types.
*
* <p>There are various solutions built into Java that depend on extra resource and configuration
* files. They are designed to be composable and extensible, but also unfortunately tricky to
* control. This is meant to be a simple solution that may eventually be subsumed by a better one.
*/
public | that |
java | google__dagger | javatests/dagger/internal/codegen/IgnoreProvisionKeyWildcardsTest.java | {
"start": 17964,
"end": 18154
} | interface ____ {",
" fun mapExtends(): Map<Foo<out Bar>, String>",
" fun map(): Map<Foo<Bar>, String>",
"}",
"@Module",
" | MyComponent |
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/rsocket/service/RSocketRequestValues.java | {
"start": 6507,
"end": 7396
} | class ____ {
private final List<Object> metadata = new ArrayList<>();
private final List<MimeType> mimeTypes = new ArrayList<>();
public void addMetadata(Object metadata) {
Assert.isTrue(this.metadata.size() == this.mimeTypes.size(), () -> "Invalid state: " + this);
this.metadata.add(metadata);
}
public void addMimeType(MimeType mimeType) {
Assert.isTrue(this.metadata.size() == (this.mimeTypes.size() + 1), () -> "Invalid state: " + this);
this.mimeTypes.add(mimeType);
}
public Map<Object, MimeType> toMap() {
Map<Object, MimeType> map = new LinkedHashMap<>(this.metadata.size());
for (int i = 0; i < this.metadata.size(); i++) {
map.put(this.metadata.get(i), this.mimeTypes.get(i));
}
return map;
}
@Override
public String toString() {
return "metadata=" + this.metadata + ", mimeTypes=" + this.mimeTypes;
}
}
}
| MetadataHelper |
java | square__retrofit | retrofit/android-test/src/androidTest/java/retrofit2/UriAndroidTest.java | {
"start": 1167,
"end": 2088
} | interface ____ {
@GET
Call<ResponseBody> method(@Url Uri url);
}
private Service service;
@Before
public void setUp() {
Retrofit retrofit =
new Retrofit.Builder()
.baseUrl(server1.url("/"))
.build();
service = retrofit.create(Service.class);
}
@Test
public void getWithAndroidUriUrl() throws IOException, InterruptedException {
server1.enqueue(new MockResponse().setBody("Hi"));
service.method(Uri.parse("foo/bar/")).execute();
assertThat(server1.takeRequest().getRequestUrl()).isEqualTo(server1.url("foo/bar/"));
}
@Test
public void getWithAndroidUriUrlAbsolute() throws IOException, InterruptedException {
server2.enqueue(new MockResponse().setBody("Hi"));
HttpUrl url = server2.url("/");
service.method(Uri.parse(url.toString())).execute();
assertThat(server2.takeRequest().getRequestUrl()).isEqualTo(url);
}
}
| Service |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/spi/EntityEntry.java | {
"start": 866,
"end": 3286
} | interface ____ {
LockMode getLockMode();
void setLockMode(LockMode lockMode);
Status getStatus();
void setStatus(Status status);
Object getId();
Object[] getLoadedState();
Object getLoadedValue(String propertyName);
void overwriteLoadedStateCollectionValue(String propertyName, PersistentCollection<?> collection);
Object[] getDeletedState();
void setDeletedState(Object[] deletedState);
boolean isExistsInDatabase();
Object getVersion();
void postInsert(Object version);
EntityPersister getPersister();
/**
* Get the {@link EntityKey} for this entry.
*
* @return the {@link EntityKey}
* @throws IllegalStateException if {@link #getId()} is null
*/
EntityKey getEntityKey();
String getEntityName();
boolean isBeingReplicated();
Object getRowId();
void postLoad(Object entity);
/**
* Handle updating the internal state of the entry after actually performing
* the database update. Specifically, we update the snapshot information and
* escalate the lock mode.
*
* @param entity The entity instance
* @param updatedState The state calculated after the update (becomes the
* new {@link #getLoadedState() loaded state}.
* @param nextVersion The new version.
*/
void postUpdate(Object entity, Object[] updatedState, Object nextVersion);
/**
* After actually deleting a row, record the fact that the instance no longer
* exists in the database.
*/
void postDelete();
/**
* After actually inserting a row, record the fact that the instance exists
* in the database (needed for identity column key generation).
*/
void postInsert(Object[] insertedState);
boolean isNullifiable(boolean earlyInsert, SharedSessionContractImplementor session);
/**
* Returns {@code true} if the entity can possibly be dirty. This can only
* be the case if it is in a modifiable state (not read-only nor deleted)
* and it either has mutable properties or field-interception is not telling
* us that it is dirty.
*
* @param entity The entity to test
*
* @return {@code true} indicates that the entity could possibly be dirty
* and that the dirty-check should happen;
* {@code false} indicates there is no way the entity can be dirty
*/
boolean requiresDirtyCheck(Object entity);
/**
* Can the entity be modified?
* <p>
* The entity is modifiable if all the following are true:
* <ul>
* <li>the entity | EntityEntry |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/gwt/CustomFieldSerializerTest.java | {
"start": 6755,
"end": 7606
} | interface ____ {
Builder setPackage(String x);
Builder setDefault(boolean x);
ValueTypeWithBuilderAndGetters build();
}
}
@Test
public void testCustomFieldSerializerWithBuilderAndGetters() throws SerializationException {
AutoValue_CustomFieldSerializerTest_ValueTypeWithBuilderAndGetters instance =
(AutoValue_CustomFieldSerializerTest_ValueTypeWithBuilderAndGetters)
ValueTypeWithBuilderAndGetters.builder().setPackage("s").setDefault(false).build();
AutoValue_CustomFieldSerializerTest_ValueTypeWithBuilderAndGetters_CustomFieldSerializer
.serialize(streamWriter, instance);
mock.verify(
() -> {
streamWriter.writeString("s");
streamWriter.writeBoolean(false);
});
}
@AutoValue
@GwtCompatible(serializable = true)
abstract static | Builder |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/TestSerializable.java | {
"start": 384,
"end": 784
} | class ____ {
private long id;
private Serializable value;
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public Serializable getValue() {
return value;
}
public void setValue(Serializable value) {
this.value = value;
}
}
}
| VO |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/annotation/TypeMappedAnnotations.java | {
"start": 12163,
"end": 15058
} | class ____<A extends Annotation>
implements AnnotationsProcessor<Object, MergedAnnotation<A>> {
private final Object requiredType;
private final @Nullable Predicate<? super MergedAnnotation<A>> predicate;
private final MergedAnnotationSelector<A> selector;
private @Nullable MergedAnnotation<A> result;
MergedAnnotationFinder(Object requiredType, @Nullable Predicate<? super MergedAnnotation<A>> predicate,
@Nullable MergedAnnotationSelector<A> selector) {
this.requiredType = requiredType;
this.predicate = predicate;
this.selector = (selector != null ? selector : MergedAnnotationSelectors.nearest());
}
@Override
public @Nullable MergedAnnotation<A> doWithAggregate(Object context, int aggregateIndex) {
return this.result;
}
@Override
public @Nullable MergedAnnotation<A> doWithAnnotations(Object type, int aggregateIndex,
@Nullable Object source, @Nullable Annotation[] annotations) {
for (Annotation annotation : annotations) {
if (annotation != null && !annotationFilter.matches(annotation)) {
MergedAnnotation<A> result = process(type, aggregateIndex, source, annotation);
if (result != null) {
return result;
}
}
}
return null;
}
private @Nullable MergedAnnotation<A> process(
Object type, int aggregateIndex, @Nullable Object source, Annotation annotation) {
Annotation[] repeatedAnnotations = repeatableContainers.findRepeatedAnnotations(annotation);
if (repeatedAnnotations != null) {
MergedAnnotation<A> result = doWithAnnotations(type, aggregateIndex, source, repeatedAnnotations);
if (result != null) {
return result;
}
}
AnnotationTypeMappings mappings = AnnotationTypeMappings.forAnnotationType(
annotation.annotationType(), repeatableContainers, annotationFilter);
for (int i = 0; i < mappings.size(); i++) {
AnnotationTypeMapping mapping = mappings.get(i);
if (isMappingForType(mapping, annotationFilter, this.requiredType)) {
MergedAnnotation<A> candidate = TypeMappedAnnotation.createIfPossible(
mapping, source, annotation, aggregateIndex, IntrospectionFailureLogger.INFO);
if (candidate != null && (this.predicate == null || this.predicate.test(candidate))) {
if (this.selector.isBestCandidate(candidate)) {
return candidate;
}
updateLastResult(candidate);
}
}
}
return null;
}
private void updateLastResult(MergedAnnotation<A> candidate) {
MergedAnnotation<A> lastResult = this.result;
this.result = (lastResult != null ? this.selector.select(lastResult, candidate) : candidate);
}
@Override
public @Nullable MergedAnnotation<A> finish(@Nullable MergedAnnotation<A> result) {
return (result != null ? result : this.result);
}
}
/**
* {@link AnnotationsProcessor} that collects {@link Aggregate} instances.
*/
private | MergedAnnotationFinder |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/tests/vertx/CreateVertxTest.java | {
"start": 703,
"end": 1746
} | class ____ extends VertxTestBase {
@Test
public void testCreateSimpleVertx() {
Vertx vertx = vertx();
assertNotNull(vertx);
}
@Test
public void testCreateVertxWithOptions() {
VertxOptions options = new VertxOptions();
Vertx vertx = vertx(options);
assertNotNull(vertx);
assertFalse(vertx.isClustered());
}
@Test
public void testCreateClusteredVertxAsync() {
VertxOptions options = new VertxOptions();
clusteredVertx(options)
.compose(v -> {
assertTrue(v.isClustered());
return v.close();
}).await();
}
@Test
public void testCreateClusteredVertxAsyncDetectJoinFailure() {
ClusterManager clusterManager = new FakeClusterManager(){
@Override
public void join(Completable<Void> promise) {
promise.fail("joinfailure");
}
};
try {
clusteredVertx(new VertxOptions(), clusterManager).await();
} catch (Throwable e) {
assertEquals("joinfailure", e.getMessage());
return;
}
fail();
}
}
| CreateVertxTest |
java | spring-projects__spring-boot | module/spring-boot-graphql/src/main/java/org/springframework/boot/graphql/autoconfigure/observation/GraphQlObservationAutoConfiguration.java | {
"start": 2111,
"end": 2746
} | class ____ {
@Bean
@ConditionalOnMissingBean
GraphQlObservationInstrumentation graphQlObservationInstrumentation(ObservationRegistry observationRegistry,
ObjectProvider<ExecutionRequestObservationConvention> executionConvention,
ObjectProvider<DataFetcherObservationConvention> dataFetcherConvention,
ObjectProvider<DataLoaderObservationConvention> dataLoaderObservationConvention) {
return new GraphQlObservationInstrumentation(observationRegistry, executionConvention.getIfAvailable(),
dataFetcherConvention.getIfAvailable(), dataLoaderObservationConvention.getIfAvailable());
}
}
| GraphQlObservationAutoConfiguration |
java | apache__camel | components/camel-spring-parent/camel-spring-ai/camel-spring-ai-chat/src/main/java/org/apache/camel/component/springai/chat/SpringAiChatConstants.java | {
"start": 965,
"end": 3758
} | class ____ {
@Metadata(description = "The response from the chat model", javaType = "String")
public static final String CHAT_RESPONSE = "CamelSpringAiChatResponse";
@Metadata(description = "The number of input tokens used", javaType = "Integer")
public static final String INPUT_TOKEN_COUNT = "CamelSpringAiInputTokenCount";
@Metadata(description = "The number of output tokens used", javaType = "Integer")
public static final String OUTPUT_TOKEN_COUNT = "CamelSpringAiOutputTokenCount";
@Metadata(description = "The total number of tokens used", javaType = "Integer")
public static final String TOTAL_TOKEN_COUNT = "CamelSpringAiTotalTokenCount";
@Metadata(description = "The prompt template with placeholders for variable substitution", javaType = "String")
public static final String PROMPT_TEMPLATE = "CamelSpringAiChatPromptTemplate";
@Metadata(description = "Augmented data for RAG as List<org.springframework.ai.document.Document>",
javaType = "java.util.List<org.springframework.ai.document.Document>")
public static final String AUGMENTED_DATA = "CamelSpringAiChatAugmentedData";
@Metadata(description = "System message for the conversation", javaType = "String")
public static final String SYSTEM_MESSAGE = "CamelSpringAiChatSystemMessage";
@Metadata(description = "Temperature parameter for response randomness (0.0-2.0)", javaType = "Double")
public static final String TEMPERATURE = "CamelSpringAiChatTemperature";
@Metadata(description = "Maximum tokens in the response", javaType = "Integer")
public static final String MAX_TOKENS = "CamelSpringAiChatMaxTokens";
@Metadata(description = "Top P parameter for nucleus sampling", javaType = "Double")
public static final String TOP_P = "CamelSpringAiChatTopP";
@Metadata(description = "Top K parameter for sampling", javaType = "Integer")
public static final String TOP_K = "CamelSpringAiChatTopK";
@Metadata(description = "User message text for multimodal requests", javaType = "String")
public static final String USER_MESSAGE = "CamelSpringAiChatUserMessage";
@Metadata(description = "Media data for multimodal requests (image or audio)", javaType = "byte[]")
public static final String MEDIA_DATA = "CamelSpringAiChatMediaData";
@Metadata(description = "Media type (MIME type) for multimodal requests (e.g., image/png, audio/wav)", javaType = "String")
public static final String MEDIA_TYPE = "CamelSpringAiChatMediaType";
@Metadata(description = "The output format type for structured output conversion (BEAN, MAP, LIST)", javaType = "String")
public static final String OUTPUT_FORMAT = "CamelSpringAiChatOutputFormat";
@Metadata(description = "The Java | SpringAiChatConstants |
java | spring-projects__spring-security | saml2/saml2-service-provider/src/main/java/org/springframework/security/saml2/provider/service/registration/RelyingPartyRegistration.java | {
"start": 32330,
"end": 42273
} | class ____ {
private String registrationId;
private String entityId = "{baseUrl}/saml2/service-provider-metadata/{registrationId}";
private Collection<Saml2X509Credential> signingX509Credentials = new LinkedHashSet<>();
private Collection<Saml2X509Credential> decryptionX509Credentials = new LinkedHashSet<>();
private String assertionConsumerServiceLocation = "{baseUrl}/login/saml2/sso/{registrationId}";
private Saml2MessageBinding assertionConsumerServiceBinding = Saml2MessageBinding.POST;
private String singleLogoutServiceLocation;
private String singleLogoutServiceResponseLocation;
private Collection<Saml2MessageBinding> singleLogoutServiceBindings = new LinkedHashSet<>();
private String nameIdFormat = null;
private boolean authnRequestsSigned = false;
private AssertingPartyMetadata.Builder<?> assertingPartyMetadataBuilder;
protected Builder(String registrationId, AssertingPartyMetadata.Builder<?> assertingPartyMetadataBuilder) {
this.registrationId = registrationId;
this.assertingPartyMetadataBuilder = assertingPartyMetadataBuilder;
}
/**
* Sets the {@code registrationId} template. Often be used in URL paths
* @param id registrationId for this object, should be unique
* @return this object
*/
public Builder registrationId(String id) {
this.registrationId = id;
return this;
}
/**
* Set the relying party's <a href=
* "https://www.oasis-open.org/committees/download.php/51890/SAML%20MD%20simplified%20overview.pdf#2.9%20EntityDescriptor">EntityID</a>.
* Equivalent to the value found in the relying party's <EntityDescriptor
* EntityID="..."/>
*
* This value may contain a number of placeholders. They are {@code baseUrl},
* {@code registrationId}, {@code baseScheme}, {@code baseHost}, and
* {@code basePort}.
* @param entityId the relying party's EntityID
* @return the {@link Builder} for further configuration
* @since 5.4
*/
public Builder entityId(String entityId) {
this.entityId = entityId;
return this;
}
/**
* Apply this {@link Consumer} to the {@link Collection} of
* {@link Saml2X509Credential}s for the purposes of modifying the
* {@link Collection}
* @param credentialsConsumer - the {@link Consumer} for modifying the
* {@link Collection}
* @return the {@link Builder} for further configuration
* @since 5.4
*/
public Builder signingX509Credentials(Consumer<Collection<Saml2X509Credential>> credentialsConsumer) {
credentialsConsumer.accept(this.signingX509Credentials);
return this;
}
/**
* Apply this {@link Consumer} to the {@link Collection} of
* {@link Saml2X509Credential}s for the purposes of modifying the
* {@link Collection}
* @param credentialsConsumer - the {@link Consumer} for modifying the
* {@link Collection}
* @return the {@link Builder} for further configuration
* @since 5.4
*/
public Builder decryptionX509Credentials(Consumer<Collection<Saml2X509Credential>> credentialsConsumer) {
credentialsConsumer.accept(this.decryptionX509Credentials);
return this;
}
/**
* Set the <a href=
* "https://www.oasis-open.org/committees/download.php/51890/SAML%20MD%20simplified%20overview.pdf#2.3%20AttributeConsumingService">
* AssertionConsumerService</a> Location.
*
* <p>
* Equivalent to the value found in <AssertionConsumerService
* Location="..."/> in the relying party's <SPSSODescriptor>
*
* <p>
* This value may contain a number of placeholders. They are {@code baseUrl},
* {@code registrationId}, {@code baseScheme}, {@code baseHost}, and
* {@code basePort}.
* @param assertionConsumerServiceLocation the AssertionConsumerService location
* @return the {@link Builder} for further configuration
* @since 5.4
*/
public Builder assertionConsumerServiceLocation(String assertionConsumerServiceLocation) {
this.assertionConsumerServiceLocation = assertionConsumerServiceLocation;
return this;
}
/**
* Set the <a href=
* "https://www.oasis-open.org/committees/download.php/51890/SAML%20MD%20simplified%20overview.pdf#2.3%20AttributeConsumingService">
* AssertionConsumerService</a> Binding.
*
* <p>
* Equivalent to the value found in <AssertionConsumerService
* Binding="..."/> in the relying party's <SPSSODescriptor>
* @param assertionConsumerServiceBinding the AssertionConsumerService binding
* @return the {@link Builder} for further configuration
* @since 5.4
*/
public Builder assertionConsumerServiceBinding(Saml2MessageBinding assertionConsumerServiceBinding) {
this.assertionConsumerServiceBinding = assertionConsumerServiceBinding;
return this;
}
/**
* Set the <a href=
* "https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf#page=7">SingleLogoutService
* Binding</a>
*
* <p>
* Equivalent to the value found in <SingleLogoutService Binding="..."/> in
* the relying party's <SPSSODescriptor>.
* @param singleLogoutServiceBinding the SingleLogoutService Binding
* @return the {@link Builder} for further configuration
* @since 5.6
*/
public Builder singleLogoutServiceBinding(Saml2MessageBinding singleLogoutServiceBinding) {
return this.singleLogoutServiceBindings((saml2MessageBindings) -> {
saml2MessageBindings.clear();
saml2MessageBindings.add(singleLogoutServiceBinding);
});
}
/**
* Apply this {@link Consumer} to the {@link Collection} of
* {@link Saml2MessageBinding}s for the purposes of modifying the <a href=
* "https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf#page=7">SingleLogoutService
* Binding</a> {@link Collection}.
*
* <p>
* Equivalent to the value found in <SingleLogoutService Binding="..."/> in
* the relying party's <SPSSODescriptor>.
* @param bindingsConsumer - the {@link Consumer} for modifying the
* {@link Collection}
* @return the {@link Builder} for further configuration
* @since 5.8
*/
public Builder singleLogoutServiceBindings(Consumer<Collection<Saml2MessageBinding>> bindingsConsumer) {
bindingsConsumer.accept(this.singleLogoutServiceBindings);
return this;
}
/**
* Set the <a href=
* "https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf#page=7">SingleLogoutService
* Location</a>
*
* <p>
* Equivalent to the value found in <SingleLogoutService Location="..."/> in
* the relying party's <SPSSODescriptor>.
* @param singleLogoutServiceLocation the SingleLogoutService Location
* @return the {@link Builder} for further configuration
* @since 5.6
*/
public Builder singleLogoutServiceLocation(String singleLogoutServiceLocation) {
this.singleLogoutServiceLocation = singleLogoutServiceLocation;
return this;
}
/**
* Set the <a href=
* "https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf#page=7">SingleLogoutService
* Response Location</a>
*
* <p>
* Equivalent to the value found in <SingleLogoutService
* ResponseLocation="..."/> in the relying party's <SPSSODescriptor>.
* @param singleLogoutServiceResponseLocation the SingleLogoutService Response
* Location
* @return the {@link Builder} for further configuration
* @since 5.6
*/
public Builder singleLogoutServiceResponseLocation(String singleLogoutServiceResponseLocation) {
this.singleLogoutServiceResponseLocation = singleLogoutServiceResponseLocation;
return this;
}
/**
* Set the NameID format
* @param nameIdFormat the given NameID format
* @return the {@link Builder} for further configuration
* @since 5.7
*/
public Builder nameIdFormat(String nameIdFormat) {
this.nameIdFormat = nameIdFormat;
return this;
}
/**
* Set the <a href=
* "https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf#page=18">
* AuthnRequestsSigned</a> setting. If {@code true}, the relying party will sign
* all AuthnRequests, 301 asserting party preference.
*
* <p>
* Note that Spring Security will sign the request if either
* {@link #isAuthnRequestsSigned()} is {@code true} or
* {@link AssertingPartyDetails#getWantAuthnRequestsSigned()} is {@code true}.
* @return the {@link Builder} for further configuration
* @since 6.1
*/
public Builder authnRequestsSigned(Boolean authnRequestsSigned) {
this.authnRequestsSigned = authnRequestsSigned;
return this;
}
/**
* Apply this {@link Consumer} to further configure the Asserting Party metadata
* @param assertingPartyMetadata The {@link Consumer} to apply
* @return the {@link Builder} for further configuration
* @since 6.4
*/
public Builder assertingPartyMetadata(Consumer<AssertingPartyMetadata.Builder<?>> assertingPartyMetadata) {
assertingPartyMetadata.accept(this.assertingPartyMetadataBuilder);
return this;
}
/**
* Constructs a RelyingPartyRegistration object based on the builder
* configurations
* @return a RelyingPartyRegistration instance
*/
public RelyingPartyRegistration build() {
if (this.singleLogoutServiceResponseLocation == null) {
this.singleLogoutServiceResponseLocation = this.singleLogoutServiceLocation;
}
if (this.singleLogoutServiceBindings.isEmpty()) {
this.singleLogoutServiceBindings.add(Saml2MessageBinding.POST);
}
AssertingPartyMetadata party = this.assertingPartyMetadataBuilder.build();
return new RelyingPartyRegistration(this.registrationId, this.entityId,
this.assertionConsumerServiceLocation, this.assertionConsumerServiceBinding,
this.singleLogoutServiceLocation, this.singleLogoutServiceResponseLocation,
this.singleLogoutServiceBindings, party, this.nameIdFormat, this.authnRequestsSigned,
this.decryptionX509Credentials, this.signingX509Credentials);
}
}
}
| Builder |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RenderSearchApplicationQueryAction.java | {
"start": 802,
"end": 1156
} | class ____ {
public static final String NAME = "cluster:admin/xpack/application/search_application/render_query";
public static final ActionType<RenderSearchApplicationQueryAction.Response> INSTANCE = new ActionType<>(NAME);
private RenderSearchApplicationQueryAction() {/* no instances */}
public static | RenderSearchApplicationQueryAction |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/bean/SimpleLanguageBeanBodyParenthesisTest.java | {
"start": 989,
"end": 2005
} | class ____ extends ContextTestSupport {
@Test
public void testNo() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(0);
getMockEndpoint("mock:other").expectedMessageCount(1);
template.sendBody("direct:single", "Camel");
assertMockEndpointsSatisfied();
}
@Test
public void testYes() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(1);
getMockEndpoint("mock:other").expectedMessageCount(0);
template.sendBody("direct:single", "Hello(World) how are you");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:single").choice().when().simple("${body.contains(\")\")}").to("mock:result").otherwise()
.to("mock:other");
}
};
}
}
| SimpleLanguageBeanBodyParenthesisTest |
java | spring-projects__spring-boot | module/spring-boot-session/src/testFixtures/java/org/springframework/boot/session/autoconfigure/AbstractSessionReactiveAutoConfigurationTests.java | {
"start": 1861,
"end": 1999
} | class ____ Spring Session auto-configuration tests when the backing store is
* reactive.
*
* @author Andy Wilkinson
*/
public abstract | for |
java | apache__camel | components/camel-debezium/camel-debezium-db2/src/generated/java/org/apache/camel/component/debezium/db2/configuration/Db2ConnectorEmbeddedDebeziumConfiguration.java | {
"start": 42631,
"end": 54346
} | class ____ should be used to store and
* recover database schema changes. The configuration properties for the
* history are prefixed with the 'schema.history.internal.' string.
*/
public void setSchemaHistoryInternal(String schemaHistoryInternal) {
this.schemaHistoryInternal = schemaHistoryInternal;
}
public String getSchemaHistoryInternal() {
return schemaHistoryInternal;
}
/**
* Regular expressions matching columns to exclude from change events
*/
public void setColumnExcludeList(String columnExcludeList) {
this.columnExcludeList = columnExcludeList;
}
public String getColumnExcludeList() {
return columnExcludeList;
}
/**
* Resolvable hostname or IP address of the database server.
*/
public void setDatabaseHostname(String databaseHostname) {
this.databaseHostname = databaseHostname;
}
public String getDatabaseHostname() {
return databaseHostname;
}
/**
* Specify how schema names should be adjusted for compatibility with the
* message converter used by the connector, including: 'avro' replaces the
* characters that cannot be used in the Avro type name with underscore;
* 'avro_unicode' replaces the underscore or characters that cannot be used
* in the Avro type name with corresponding unicode like _uxxxx. Note: _ is
* an escape sequence like backslash in Java;'none' does not apply any
* adjustment (default)
*/
public void setSchemaNameAdjustmentMode(String schemaNameAdjustmentMode) {
this.schemaNameAdjustmentMode = schemaNameAdjustmentMode;
}
public String getSchemaNameAdjustmentMode() {
return schemaNameAdjustmentMode;
}
/**
* The tables for which changes are to be captured
*/
public void setTableIncludeList(String tableIncludeList) {
this.tableIncludeList = tableIncludeList;
}
public String getTableIncludeList() {
return tableIncludeList;
}
/**
* The maximum time in milliseconds to wait for connection validation to
* complete. Defaults to 60 seconds.
*/
public void setConnectionValidationTimeoutMs(
long connectionValidationTimeoutMs) {
this.connectionValidationTimeoutMs = connectionValidationTimeoutMs;
}
public long getConnectionValidationTimeoutMs() {
return connectionValidationTimeoutMs;
}
/**
* Informs connector which Db2 implementation platform it is connected to.
* The default is 'LUW', which means Windows, UNIX, Linux. Using a value of
* 'Z' ensures that the Db2 for z/OS specific SQL statements are used.
*/
public void setDb2Platform(String db2Platform) {
this.db2Platform = db2Platform;
}
public String getDb2Platform() {
return db2Platform;
}
@Override
protected Configuration createConnectorConfiguration() {
final Configuration.Builder configBuilder = Configuration.create();
addPropertyIfNotNull(configBuilder, "message.key.columns", messageKeyColumns);
addPropertyIfNotNull(configBuilder, "transaction.metadata.factory", transactionMetadataFactory);
addPropertyIfNotNull(configBuilder, "streaming.delay.ms", streamingDelayMs);
addPropertyIfNotNull(configBuilder, "custom.metric.tags", customMetricTags);
addPropertyIfNotNull(configBuilder, "openlineage.integration.job.namespace", openlineageIntegrationJobNamespace);
addPropertyIfNotNull(configBuilder, "query.fetch.size", queryFetchSize);
addPropertyIfNotNull(configBuilder, "signal.enabled.channels", signalEnabledChannels);
addPropertyIfNotNull(configBuilder, "include.schema.changes", includeSchemaChanges);
addPropertyIfNotNull(configBuilder, "poll.interval.ms", pollIntervalMs);
addPropertyIfNotNull(configBuilder, "guardrail.collections.max", guardrailCollectionsMax);
addPropertyIfNotNull(configBuilder, "signal.data.collection", signalDataCollection);
addPropertyIfNotNull(configBuilder, "converters", converters);
addPropertyIfNotNull(configBuilder, "heartbeat.topics.prefix", heartbeatTopicsPrefix);
addPropertyIfNotNull(configBuilder, "snapshot.fetch.size", snapshotFetchSize);
addPropertyIfNotNull(configBuilder, "openlineage.integration.job.tags", openlineageIntegrationJobTags);
addPropertyIfNotNull(configBuilder, "snapshot.lock.timeout.ms", snapshotLockTimeoutMs);
addPropertyIfNotNull(configBuilder, "cdc.change.tables.schema", cdcChangeTablesSchema);
addPropertyIfNotNull(configBuilder, "database.user", databaseUser);
addPropertyIfNotNull(configBuilder, "database.dbname", databaseDbname);
addPropertyIfNotNull(configBuilder, "datatype.propagate.source.type", datatypePropagateSourceType);
addPropertyIfNotNull(configBuilder, "snapshot.tables.order.by.row.count", snapshotTablesOrderByRowCount);
addPropertyIfNotNull(configBuilder, "incremental.snapshot.watermarking.strategy", incrementalSnapshotWatermarkingStrategy);
addPropertyIfNotNull(configBuilder, "snapshot.select.statement.overrides", snapshotSelectStatementOverrides);
addPropertyIfNotNull(configBuilder, "heartbeat.interval.ms", heartbeatIntervalMs);
addPropertyIfNotNull(configBuilder, "snapshot.mode.configuration.based.snapshot.on.schema.error", snapshotModeConfigurationBasedSnapshotOnSchemaError);
addPropertyIfNotNull(configBuilder, "schema.history.internal.skip.unparseable.ddl", schemaHistoryInternalSkipUnparseableDdl);
addPropertyIfNotNull(configBuilder, "column.include.list", columnIncludeList);
addPropertyIfNotNull(configBuilder, "column.propagate.source.type", columnPropagateSourceType);
addPropertyIfNotNull(configBuilder, "errors.max.retries", errorsMaxRetries);
addPropertyIfNotNull(configBuilder, "table.exclude.list", tableExcludeList);
addPropertyIfNotNull(configBuilder, "database.password", databasePassword);
addPropertyIfNotNull(configBuilder, "max.batch.size", maxBatchSize);
addPropertyIfNotNull(configBuilder, "skipped.operations", skippedOperations);
addPropertyIfNotNull(configBuilder, "openlineage.integration.job.description", openlineageIntegrationJobDescription);
addPropertyIfNotNull(configBuilder, "topic.naming.strategy", topicNamingStrategy);
addPropertyIfNotNull(configBuilder, "snapshot.mode", snapshotMode);
addPropertyIfNotNull(configBuilder, "snapshot.mode.configuration.based.snapshot.data", snapshotModeConfigurationBasedSnapshotData);
addPropertyIfNotNull(configBuilder, "extended.headers.enabled", extendedHeadersEnabled);
addPropertyIfNotNull(configBuilder, "max.queue.size", maxQueueSize);
addPropertyIfNotNull(configBuilder, "guardrail.collections.limit.action", guardrailCollectionsLimitAction);
addPropertyIfNotNull(configBuilder, "incremental.snapshot.chunk.size", incrementalSnapshotChunkSize);
addPropertyIfNotNull(configBuilder, "openlineage.integration.job.owners", openlineageIntegrationJobOwners);
addPropertyIfNotNull(configBuilder, "openlineage.integration.config.file.path", openlineageIntegrationConfigFilePath);
addPropertyIfNotNull(configBuilder, "retriable.restart.connector.wait.ms", retriableRestartConnectorWaitMs);
addPropertyIfNotNull(configBuilder, "snapshot.delay.ms", snapshotDelayMs);
addPropertyIfNotNull(configBuilder, "executor.shutdown.timeout.ms", executorShutdownTimeoutMs);
addPropertyIfNotNull(configBuilder, "provide.transaction.metadata", provideTransactionMetadata);
addPropertyIfNotNull(configBuilder, "schema.history.internal.store.only.captured.tables.ddl", schemaHistoryInternalStoreOnlyCapturedTablesDdl);
addPropertyIfNotNull(configBuilder, "schema.history.internal.store.only.captured.databases.ddl", schemaHistoryInternalStoreOnlyCapturedDatabasesDdl);
addPropertyIfNotNull(configBuilder, "snapshot.mode.configuration.based.snapshot.on.data.error", snapshotModeConfigurationBasedSnapshotOnDataError);
addPropertyIfNotNull(configBuilder, "schema.history.internal.file.filename", schemaHistoryInternalFileFilename);
addPropertyIfNotNull(configBuilder, "tombstones.on.delete", tombstonesOnDelete);
addPropertyIfNotNull(configBuilder, "topic.prefix", topicPrefix);
addPropertyIfNotNull(configBuilder, "decimal.handling.mode", decimalHandlingMode);
addPropertyIfNotNull(configBuilder, "sourceinfo.struct.maker", sourceinfoStructMaker);
addPropertyIfNotNull(configBuilder, "openlineage.integration.dataset.kafka.bootstrap.servers", openlineageIntegrationDatasetKafkaBootstrapServers);
addPropertyIfNotNull(configBuilder, "cdc.control.schema", cdcControlSchema);
addPropertyIfNotNull(configBuilder, "table.ignore.builtin", tableIgnoreBuiltin);
addPropertyIfNotNull(configBuilder, "openlineage.integration.enabled", openlineageIntegrationEnabled);
addPropertyIfNotNull(configBuilder, "snapshot.include.collection.list", snapshotIncludeCollectionList);
addPropertyIfNotNull(configBuilder, "snapshot.mode.configuration.based.start.stream", snapshotModeConfigurationBasedStartStream);
addPropertyIfNotNull(configBuilder, "max.queue.size.in.bytes", maxQueueSizeInBytes);
addPropertyIfNotNull(configBuilder, "snapshot.mode.configuration.based.snapshot.schema", snapshotModeConfigurationBasedSnapshotSchema);
addPropertyIfNotNull(configBuilder, "time.precision.mode", timePrecisionMode);
addPropertyIfNotNull(configBuilder, "signal.poll.interval.ms", signalPollIntervalMs);
addPropertyIfNotNull(configBuilder, "post.processors", postProcessors);
addPropertyIfNotNull(configBuilder, "notification.enabled.channels", notificationEnabledChannels);
addPropertyIfNotNull(configBuilder, "event.processing.failure.handling.mode", eventProcessingFailureHandlingMode);
addPropertyIfNotNull(configBuilder, "database.port", databasePort);
addPropertyIfNotNull(configBuilder, "notification.sink.topic.name", notificationSinkTopicName);
addPropertyIfNotNull(configBuilder, "snapshot.mode.custom.name", snapshotModeCustomName);
addPropertyIfNotNull(configBuilder, "schema.history.internal", schemaHistoryInternal);
addPropertyIfNotNull(configBuilder, "column.exclude.list", columnExcludeList);
addPropertyIfNotNull(configBuilder, "database.hostname", databaseHostname);
addPropertyIfNotNull(configBuilder, "schema.name.adjustment.mode", schemaNameAdjustmentMode);
addPropertyIfNotNull(configBuilder, "table.include.list", tableIncludeList);
addPropertyIfNotNull(configBuilder, "connection.validation.timeout.ms", connectionValidationTimeoutMs);
addPropertyIfNotNull(configBuilder, "db2.platform", db2Platform);
return configBuilder.build();
}
@Override
protected Class configureConnectorClass() {
return Db2Connector.class;
}
@Override
protected ConfigurationValidation validateConnectorConfiguration() {
if (isFieldValueNotSet(databasePassword)) {
return ConfigurationValidation.notValid("Required field 'databasePassword' must be set.");
}
if (isFieldValueNotSet(topicPrefix)) {
return ConfigurationValidation.notValid("Required field 'topicPrefix' must be set.");
}
return ConfigurationValidation.valid();
}
@Override
public String getConnectorDatabaseType() {
return "db2";
}
} | that |
java | netty__netty | common/src/main/java/io/netty/util/concurrent/FailedFuture.java | {
"start": 971,
"end": 1867
} | class ____<V> extends CompleteFuture<V> {
private final Throwable cause;
/**
* Creates a new instance.
*
* @param executor the {@link EventExecutor} associated with this future
* @param cause the cause of failure
*/
public FailedFuture(EventExecutor executor, Throwable cause) {
super(executor);
this.cause = ObjectUtil.checkNotNull(cause, "cause");
}
@Override
public Throwable cause() {
return cause;
}
@Override
public boolean isSuccess() {
return false;
}
@Override
public Future<V> sync() {
PlatformDependent.throwException(cause);
return this;
}
@Override
public Future<V> syncUninterruptibly() {
PlatformDependent.throwException(cause);
return this;
}
@Override
public V getNow() {
return null;
}
}
| FailedFuture |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/StructuredType.java | {
"start": 3334,
"end": 3775
} | class ____ incomplete. We might add new features such
* as method declarations in the future. Also ordering is not supported yet.
*
* <p>The serialized string representation is {@code `cat`.`db`.`t`} where {@code cat} is the
* catalog name, {@code db} is the database name, and {@code t} the user-defined type name.
*
* <h1>Inline Structured Types</h1>
*
* <p>Types that are unregistered (i.e. declared inline) and are identified by a | is |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/Mock.java | {
"start": 2611,
"end": 2859
} | class ____ a corresponding hook.
* </p>
*
* @see Mockito#mock(Class)
* @see Spy
* @see InjectMocks
* @see MockitoAnnotations#openMocks(Object)
* @see MockitoJUnitRunner
*/
@Target({FIELD, PARAMETER})
@Retention(RUNTIME)
@Documented
public @ | with |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/TemplateNode.java | {
"start": 290,
"end": 2395
} | interface ____ {
/**
*
* @param context
* @return the result node
*/
CompletionStage<ResultNode> resolve(ResolutionContext context);
/**
*
* @return a list of expressions
*/
default List<Expression> getExpressions() {
return Collections.emptyList();
}
/**
* Returns the parameter declarations defined in this template node.
*
* @return a list of param declarations
*/
default List<ParameterDeclaration> getParameterDeclarations() {
return Collections.emptyList();
}
/**
*
* @return the origin of the node
*/
Origin getOrigin();
/**
* Constant means a static text or a literal output expression.
*
* @return {@code true} if the node represents a constant
* @see TextNode
* @see Expression#isLiteral()
*/
default boolean isConstant() {
return false;
}
/**
*
* @return {@code true} if the node represents a section
* @see SectionNode
*/
default boolean isSection() {
return kind() == Kind.SECTION;
}
/**
*
* @return {@code true} if the node represents a text
* @see TextNode
*/
default boolean isText() {
return kind() == Kind.TEXT;
}
/**
*
* @return{@code true} if the node represents an output expression
* @see ExpressionNode
*/
default boolean isExpression() {
return kind() == Kind.EXPRESSION;
}
/**
* Returns the kind of this node.
* <p>
* Note that comments and line separators are never preserved in the parsed template tree.
*
* @return the kind
*/
Kind kind();
default TextNode asText() {
throw new IllegalStateException();
}
default SectionNode asSection() {
throw new IllegalStateException();
}
default ExpressionNode asExpression() {
throw new IllegalStateException();
}
default ParameterDeclarationNode asParamDeclaration() {
throw new IllegalStateException();
}
public | TemplateNode |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanPostProcessorTests.java | {
"start": 136622,
"end": 137915
} | class ____ extends ResourceInjectionBean {
@Autowired(required = false)
protected ITestBean testBean3;
private IndexedTestBean indexedTestBean;
private List<NestedTestBean> nestedTestBeans;
public List<NestedTestBean> nestedTestBeansSetter;
@Autowired(required = false)
public List<NestedTestBean> nestedTestBeansField;
private ITestBean testBean4;
@Override
@Autowired(required = false)
public void setTestBean2(TestBean testBean2) {
super.setTestBean2(testBean2);
}
@Autowired(required = false)
private void inject(ITestBean testBean4, List<NestedTestBean> nestedTestBeans, IndexedTestBean indexedTestBean) {
this.testBean4 = testBean4;
this.indexedTestBean = indexedTestBean;
this.nestedTestBeans = nestedTestBeans;
}
@Autowired(required = false)
public void setNestedTestBeans(List<NestedTestBean> nestedTestBeans) {
this.nestedTestBeansSetter = nestedTestBeans;
}
public ITestBean getTestBean3() {
return this.testBean3;
}
public ITestBean getTestBean4() {
return this.testBean4;
}
public IndexedTestBean getIndexedTestBean() {
return this.indexedTestBean;
}
public List<NestedTestBean> getNestedTestBeans() {
return this.nestedTestBeans;
}
}
public static | OptionalCollectionResourceInjectionBean |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryLambdaTest.java | {
"start": 7376,
"end": 7958
} | class ____ {
private static String notUpperCased(String x) {
return "hello " + x;
}
void g() {
Function<String, String> l = Test::notUpperCased;
System.err.println(notUpperCased("world"));
}
}
""")
.doTest();
}
@Test
public void method_shapes() {
testHelper
.addInputLines(
"Test.java",
"""
import java.util.function.BiFunction;
import java.util.function.Supplier;
| Test |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/propertyeditors/URIEditorTests.java | {
"start": 934,
"end": 4505
} | class ____ {
@Test
void standardURI() {
doTestURI("mailto:juergen.hoeller@interface21.com");
}
@Test
void withNonExistentResource() {
doTestURI("gonna:/freak/in/the/morning/freak/in/the.evening");
}
@Test
void standardURL() {
doTestURI("https://www.springframework.org");
}
@Test
void standardURLWithFragment() {
doTestURI("https://www.springframework.org#1");
}
@Test
void standardURLWithWhitespace() {
PropertyEditor uriEditor = new URIEditor();
uriEditor.setAsText(" https://www.springframework.org ");
Object value = uriEditor.getValue();
assertThat(value).isInstanceOf(URI.class);
URI uri = (URI) value;
assertThat(uri.toString()).isEqualTo("https://www.springframework.org");
}
@Test
void classpathURL() {
PropertyEditor uriEditor = new URIEditor(getClass().getClassLoader());
uriEditor.setAsText("classpath:" + ClassUtils.classPackageAsResourcePath(getClass()) +
"/" + ClassUtils.getShortName(getClass()) + ".class");
Object value = uriEditor.getValue();
assertThat(value).isInstanceOf(URI.class);
URI uri = (URI) value;
assertThat(uriEditor.getAsText()).isEqualTo(uri.toString());
assertThat(uri.getScheme()).doesNotStartWith("classpath");
}
@Test
void classpathURLWithWhitespace() {
PropertyEditor uriEditor = new URIEditor(getClass().getClassLoader());
uriEditor.setAsText(" classpath:" + ClassUtils.classPackageAsResourcePath(getClass()) +
"/" + ClassUtils.getShortName(getClass()) + ".class ");
Object value = uriEditor.getValue();
assertThat(value).isInstanceOf(URI.class);
URI uri = (URI) value;
assertThat(uriEditor.getAsText()).isEqualTo(uri.toString());
assertThat(uri.getScheme()).doesNotStartWith("classpath");
}
@Test
void classpathURLAsIs() {
PropertyEditor uriEditor = new URIEditor();
uriEditor.setAsText("classpath:test.txt");
Object value = uriEditor.getValue();
assertThat(value).isInstanceOf(URI.class);
URI uri = (URI) value;
assertThat(uriEditor.getAsText()).isEqualTo(uri.toString());
assertThat(uri.getScheme()).startsWith("classpath");
}
@Test
void setAsTextWithNull() {
PropertyEditor uriEditor = new URIEditor();
uriEditor.setAsText(null);
assertThat(uriEditor.getValue()).isNull();
assertThat(uriEditor.getAsText()).isEmpty();
}
@Test
void getAsTextReturnsEmptyStringIfValueNotSet() {
PropertyEditor uriEditor = new URIEditor();
assertThat(uriEditor.getAsText()).isEmpty();
}
@Test
void encodeURI() {
PropertyEditor uriEditor = new URIEditor();
uriEditor.setAsText("https://example.com/spaces and \u20AC");
Object value = uriEditor.getValue();
assertThat(value).isInstanceOf(URI.class);
URI uri = (URI) value;
assertThat(uriEditor.getAsText()).isEqualTo(uri.toString());
assertThat(uri.toASCIIString()).isEqualTo("https://example.com/spaces%20and%20%E2%82%AC");
}
@Test
void encodeAlreadyEncodedURI() {
PropertyEditor uriEditor = new URIEditor(false);
uriEditor.setAsText("https://example.com/spaces%20and%20%E2%82%AC");
Object value = uriEditor.getValue();
assertThat(value).isInstanceOf(URI.class);
URI uri = (URI) value;
assertThat(uriEditor.getAsText()).isEqualTo(uri.toString());
assertThat(uri.toASCIIString()).isEqualTo("https://example.com/spaces%20and%20%E2%82%AC");
}
private void doTestURI(String uriSpec) {
PropertyEditor uriEditor = new URIEditor();
uriEditor.setAsText(uriSpec);
Object value = uriEditor.getValue();
assertThat(value).isInstanceOf(URI.class);
URI uri = (URI) value;
assertThat(uri.toString()).isEqualTo(uriSpec);
}
}
| URIEditorTests |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/codegen/calls/BuiltInMethodsTest.java | {
"start": 1234,
"end": 1757
} | class ____ {
private static Stream<Method> testMethodsAreAvailable() {
return Arrays.stream(BuiltInMethods.class.getMethods())
.filter(
m ->
Modifier.isStatic(m.getModifiers())
&& Modifier.isPublic(m.getModifiers()));
}
@ParameterizedTest
@MethodSource
void testMethodsAreAvailable(Method m) throws Exception {
assertThat(m.invoke(null)).isNotNull();
}
}
| BuiltInMethodsTest |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java | {
"start": 43828,
"end": 45285
} | class ____ implements Closeable {
private final Directory directory;
private RandomIndexWriter indexWriter;
private IndexReader indexReader;
private IndexSearcher indexSearcher;
public IndexReaderManager() {
this.directory = newDirectory();
}
private IndexReaderManager(Directory directory) {
this.directory = directory;
}
public IndexReader getIndexReader() throws IOException {
if (indexReader == null) {
indexWriter = new RandomIndexWriter(random(), directory);
initIndexWriter(indexWriter);
indexReader = indexWriter.getReader();
}
return indexReader;
}
public IndexSearcher getIndexSearcher() throws IOException {
if (indexSearcher == null) {
indexSearcher = newSearcher(getIndexReader());
}
return indexSearcher;
}
@Override
public void close() throws IOException {
if (indexReader != null) {
indexReader.close();
}
if (indexWriter != null) {
indexWriter.close();
}
if (directory != null) {
directory.close();
}
}
protected void initIndexWriter(RandomIndexWriter indexWriter) throws IOException {}
}
public static | IndexReaderManager |
java | quarkusio__quarkus | integration-tests/test-extension/tests/src/test/java/io/quarkus/it/extension/StartTest.java | {
"start": 209,
"end": 422
} | class ____ {
@Test
public void test1() {
assertTrue(Counter.startCounter.get() <= 1);
}
@Test
public void test2() {
assertTrue(Counter.startCounter.get() <= 1);
}
}
| StartTest |
java | micronaut-projects__micronaut-core | context/src/main/java/io/micronaut/scheduling/cron/CronExpression.java | {
"start": 5539,
"end": 12614
} | enum ____ {
SECOND(0, 59, null),
MINUTE(0, 59, null),
HOUR(0, 23, null),
DAY_OF_MONTH(1, 31, null),
MONTH(1, 12,
Arrays.asList("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC")),
DAY_OF_WEEK(1, 7,
Arrays.asList("MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"));
final int from, to;
final List<String> names;
/**
* Create a new cron field with given value.
*
* @param from The minimum value
* @param to The maximum value
* @param names The name assigned to each unit
*/
CronFieldType(int from, int to, List<String> names) {
this.from = from;
this.to = to;
this.names = names;
}
}
private static final int CRON_EXPRESSION_LENGTH_WITH_SEC = 6;
private static final int CRON_EXPRESSION_LENGTH_WITHOUT_SEC = 5;
private static final int FOUR = 4;
private final String expr;
private final SimpleField secondField;
private final SimpleField minuteField;
private final SimpleField hourField;
private final DayOfWeekField dayOfWeekField;
private final SimpleField monthField;
private final DayOfMonthField dayOfMonthField;
private CronExpression(final String expr) {
if (expr == null) {
throw new IllegalArgumentException("expr is null"); //$NON-NLS-1$
}
this.expr = expr;
final String[] parts = expr.split("\\s+"); //$NON-NLS-1$
if (parts.length < CRON_EXPRESSION_LENGTH_WITHOUT_SEC || parts.length > CRON_EXPRESSION_LENGTH_WITH_SEC) {
throw new IllegalArgumentException("Invalid cron expression [%s], expected 5 or 6 fields, got %s".formatted(expr, parts.length));
}
boolean withSeconds = parts.length == CRON_EXPRESSION_LENGTH_WITH_SEC;
int ix = withSeconds ? 1 : 0;
this.secondField = new SimpleField(CronFieldType.SECOND, withSeconds ? parts[0] : "0");
this.minuteField = new SimpleField(CronFieldType.MINUTE, parts[ix++]);
this.hourField = new SimpleField(CronFieldType.HOUR, parts[ix++]);
this.dayOfMonthField = new DayOfMonthField(parts[ix++]);
this.monthField = new SimpleField(CronFieldType.MONTH, parts[ix++]);
this.dayOfWeekField = new DayOfWeekField(parts[ix++]);
}
/**
* Create object from the String expression.
*
* @param expr The cron expression
* @return The {@link CronExpression} instance
*/
public static CronExpression create(final String expr) {
return new CronExpression(expr);
}
/**
* This will search for the next time within the next 4 years. If there is no
* time matching, an InvalidArgumentException will be thrown (it is very
* likely that the cron expression is invalid, like the February 30th).
*
* @param afterTime A date-time with a time-zone in the ISO-8601 calendar system
* @return The next time within next 4 years
*/
public ZonedDateTime nextTimeAfter(ZonedDateTime afterTime) {
return nextTimeAfter(afterTime, afterTime.plusYears(FOUR));
}
/**
* This will search for the next time within the next durationInMillis
* millisecond. Be aware that the duration is specified in millis,
* but in fact the limit is checked on a day-to-day basis.
*
* @param afterTime A date-time with a time-zone in the ISO-8601 calendar system
* @param durationInMillis The maximum duration in millis after a given time
* @return The next time within given duration
*/
public ZonedDateTime nextTimeAfter(ZonedDateTime afterTime, long durationInMillis) {
return nextTimeAfter(afterTime, afterTime.plus(Duration.ofMillis(durationInMillis)));
}
/**
* This will search for the next time within the given dateTimeBarrier.
*
* @param afterTime A date-time with a time-zone in the ISO-8601 calendar system
* @param dateTimeBarrier The upper limit or maximum date-time to check for next time
* @return The next time within given barrier
*/
public ZonedDateTime nextTimeAfter(ZonedDateTime afterTime, ZonedDateTime dateTimeBarrier) {
ZonedDateTime nextTime = ZonedDateTime.from(afterTime).withNano(0).plusSeconds(1).withNano(0);
while (true) { // day of week
while (true) { // month
while (true) { // day of month
while (true) { // hour
while (true) { // minute
while (true) { // second
if (secondField.matches(nextTime.getSecond())) {
break;
}
nextTime = nextTime.plusSeconds(1).withNano(0);
}
if (minuteField.matches(nextTime.getMinute())) {
break;
}
nextTime = nextTime.plusMinutes(1).withSecond(0).withNano(0);
}
if (hourField.matches(nextTime.getHour())) {
break;
}
nextTime = nextTime.plusHours(1).withMinute(0).withSecond(0).withNano(0);
}
if (dayOfMonthField.matches(nextTime.toLocalDate())) {
break;
}
nextTime = nextTime.plusDays(1).withHour(0).withMinute(0).withSecond(0).withNano(0);
checkIfDateTimeBarrierIsReached(nextTime, dateTimeBarrier);
}
if (monthField.matches(nextTime.getMonth().getValue())) {
break;
}
nextTime = nextTime.plusMonths(1).withDayOfMonth(1).withHour(0).withMinute(0).withSecond(0).withNano(0);
checkIfDateTimeBarrierIsReached(nextTime, dateTimeBarrier);
}
if (dayOfWeekField.matches(nextTime.toLocalDate())) {
break;
}
nextTime = nextTime.plusDays(1).withHour(0).withMinute(0).withSecond(0).withNano(0);
checkIfDateTimeBarrierIsReached(nextTime, dateTimeBarrier);
}
return nextTime;
}
private static void checkIfDateTimeBarrierIsReached(ZonedDateTime nextTime, ZonedDateTime dateTimeBarrier) {
if (nextTime.isAfter(dateTimeBarrier)) {
throw new IllegalArgumentException("No next execution time could be determined that is before the limit of " + dateTimeBarrier);
}
}
/**
* @since 3.1.0
* Returns String expression.
*
* @return The underlying cron expression as string.
*/
public String getExpression() {
return expr;
}
@Override
public String toString() {
return getClass().getSimpleName() + "<" + expr + ">";
}
/**
* A | CronFieldType |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableGenerator.java | {
"start": 3548,
"end": 5242
} | class ____ {
public int active;
public int relocating;
public int initializing;
public int unassigned;
public int unassignedPrimary;
public int primaryActive;
public int primaryInactive;
private boolean inactivePrimaryCausesRed = false;
public ClusterHealthStatus status() {
if (primaryInactive > 0) {
if (inactivePrimaryCausesRed) {
return ClusterHealthStatus.RED;
} else {
return ClusterHealthStatus.YELLOW;
}
}
if (unassigned > 0 || initializing > 0) {
return ClusterHealthStatus.YELLOW;
}
return ClusterHealthStatus.GREEN;
}
public void update(ShardRouting shardRouting) {
if (shardRouting.active()) {
active++;
if (shardRouting.primary()) {
primaryActive++;
}
if (shardRouting.relocating()) {
relocating++;
}
return;
}
if (shardRouting.primary()) {
primaryInactive++;
if (inactivePrimaryCausesRed == false) {
inactivePrimaryCausesRed = getInactivePrimaryHealth(shardRouting) == ClusterHealthStatus.RED;
}
}
if (shardRouting.initializing()) {
initializing++;
} else {
if (shardRouting.primary()) {
unassignedPrimary++;
}
unassigned++;
}
}
}
}
| ShardCounter |
java | grpc__grpc-java | api/src/main/java/io/grpc/ManagedChannelRegistry.java | {
"start": 7933,
"end": 8400
} | class ____
implements ServiceProviders.PriorityAccessor<ManagedChannelProvider> {
@Override
public boolean isAvailable(ManagedChannelProvider provider) {
return provider.isAvailable();
}
@Override
public int getPriority(ManagedChannelProvider provider) {
return provider.priority();
}
}
/** Thrown when no suitable {@link ManagedChannelProvider} objects can be found. */
public static final | ManagedChannelPriorityAccessor |
java | apache__flink | flink-queryable-state/flink-queryable-state-client-java/src/test/java/org/apache/flink/queryablestate/client/VoidNamespaceTypeInfoTest.java | {
"start": 974,
"end": 1217
} | class ____ extends TypeInformationTestBase<VoidNamespaceTypeInfo> {
@Override
protected VoidNamespaceTypeInfo[] getTestData() {
return new VoidNamespaceTypeInfo[] {VoidNamespaceTypeInfo.INSTANCE};
}
}
| VoidNamespaceTypeInfoTest |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/output/ReplayOutput.java | {
"start": 2215,
"end": 2720
} | class ____ extends Signal {
final ByteBuffer message;
BulkStringSupport(ByteBuffer message) {
if (message != null) {
// need to copy the buffer to prevent buffer lifecycle mismatch
this.message = ByteBuffer.allocate(message.remaining());
this.message.put(message);
this.message.rewind();
} else {
this.message = null;
}
}
}
public static | BulkStringSupport |
java | spring-projects__spring-boot | module/spring-boot-webflux/src/main/java/org/springframework/boot/webflux/actuate/endpoint/web/WebFluxEndpointHandlerMapping.java | {
"start": 4296,
"end": 4871
} | class ____ implements RuntimeHintsRegistrar {
private final ReflectiveRuntimeHintsRegistrar reflectiveRegistrar = new ReflectiveRuntimeHintsRegistrar();
private final BindingReflectionHintsRegistrar bindingRegistrar = new BindingReflectionHintsRegistrar();
@Override
public void registerHints(RuntimeHints hints, @Nullable ClassLoader classLoader) {
this.reflectiveRegistrar.registerRuntimeHints(hints, WebFluxLinksHandler.class);
this.bindingRegistrar.registerReflectionHints(hints.reflection(), Link.class);
}
}
}
| WebFluxEndpointHandlerMappingRuntimeHints |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java | {
"start": 1956,
"end": 17195
} | class ____ {
public static final Logger LOG = LoggerFactory.getLogger(TestParam.class);
final Configuration conf = new Configuration();
@Test
public void testAccessTimeParam() {
final AccessTimeParam p = new AccessTimeParam(AccessTimeParam.DEFAULT);
assertEquals(-1L, p.getValue().longValue());
new AccessTimeParam(-1L);
try {
new AccessTimeParam(-2L);
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testBlockSizeParam() {
final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT);
assertEquals(null, p.getValue());
assertEquals(
conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),
p.getValue(conf));
new BlockSizeParam(1L);
try {
new BlockSizeParam(0L);
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testBufferSizeParam() {
final BufferSizeParam p = new BufferSizeParam(BufferSizeParam.DEFAULT);
assertEquals(null, p.getValue());
assertEquals(
conf.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),
p.getValue(conf));
new BufferSizeParam(1);
try {
new BufferSizeParam(0);
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testDelegationParam() {
final DelegationParam p = new DelegationParam(DelegationParam.DEFAULT);
assertEquals(null, p.getValue());
}
@Test
public void testDestinationParam() {
final DestinationParam p = new DestinationParam(DestinationParam.DEFAULT);
assertEquals(null, p.getValue());
new DestinationParam("/abc");
try {
new DestinationParam("abc");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testGroupParam() {
final GroupParam p = new GroupParam(GroupParam.DEFAULT);
assertEquals(null, p.getValue());
}
@Test
public void testModificationTimeParam() {
final ModificationTimeParam p = new ModificationTimeParam(ModificationTimeParam.DEFAULT);
assertEquals(-1L, p.getValue().longValue());
new ModificationTimeParam(-1L);
try {
new ModificationTimeParam(-2L);
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testOverwriteParam() {
final OverwriteParam p = new OverwriteParam(OverwriteParam.DEFAULT);
assertEquals(false, p.getValue());
new OverwriteParam("trUe");
try {
new OverwriteParam("abc");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testOwnerParam() {
final OwnerParam p = new OwnerParam(OwnerParam.DEFAULT);
assertEquals(null, p.getValue());
}
@Test
public void testPermissionParam() {
final PermissionParam p = new PermissionParam(PermissionParam.DEFAULT);
assertEquals(new FsPermission((short)0755), p.getDirFsPermission());
assertEquals(new FsPermission((short)0644), p.getFileFsPermission());
new PermissionParam("0");
try {
new PermissionParam("-1");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
new PermissionParam("1777");
try {
new PermissionParam("2000");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new PermissionParam("8");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new PermissionParam("abc");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testRecursiveParam() {
final RecursiveParam p = new RecursiveParam(RecursiveParam.DEFAULT);
assertEquals(false, p.getValue());
new RecursiveParam("falSe");
try {
new RecursiveParam("abc");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testRenewerParam() {
final RenewerParam p = new RenewerParam(RenewerParam.DEFAULT);
assertEquals(null, p.getValue());
}
@Test
public void testReplicationParam() {
final ReplicationParam p = new ReplicationParam(ReplicationParam.DEFAULT);
assertEquals(null, p.getValue());
assertEquals(
(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT),
p.getValue(conf));
new ReplicationParam((short)1);
try {
new ReplicationParam((short)0);
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testToSortedStringEscapesURICharacters() {
final String sep = "&";
Param<?, ?> ampParam = new TokenArgumentParam("token&ersand");
Param<?, ?> equalParam = new RenewerParam("renewer=equal");
final String expected = "&renewer=renewer%3Dequal&token=token%26ampersand";
final String actual = Param.toSortedString(sep, equalParam, ampParam);
assertEquals(expected, actual);
}
@Test
public void userNameEmpty() {
UserParam userParam = new UserParam("");
assertNull(userParam.getValue());
}
@Test
public void userNameInvalidStart() {
assertThrows(IllegalArgumentException.class, () -> {
new UserParam("1x");
});
}
@Test
public void userNameInvalidDollarSign() {
assertThrows(IllegalArgumentException.class, () -> {
new UserParam("1$x");
});
}
@Test
public void userNameMinLength() {
UserParam userParam = new UserParam("a");
assertNotNull(userParam.getValue());
}
@Test
public void userNameValidDollarSign() {
UserParam userParam = new UserParam("a$");
assertNotNull(userParam.getValue());
}
@Test
public void testConcatSourcesParam() {
final String[] strings = {"/", "/foo", "/bar"};
for(int n = 0; n < strings.length; n++) {
final String[] sub = new String[n];
final Path[] paths = new Path[n];
for(int i = 0; i < paths.length; i++) {
paths[i] = new Path(sub[i] = strings[i]);
}
final String expected = StringUtils.join(",", Arrays.asList(sub));
final ConcatSourcesParam computed = new ConcatSourcesParam(paths);
assertEquals(expected, computed.getValue());
}
}
@Test
public void testUserNameOkAfterResettingPattern() {
UserParam.Domain oldDomain = UserParam.getUserPatternDomain();
String newPattern = "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$";
UserParam.setUserPattern(newPattern);
UserParam userParam = new UserParam("1x");
assertNotNull(userParam.getValue());
userParam = new UserParam("123");
assertNotNull(userParam.getValue());
UserParam.setUserPatternDomain(oldDomain);
}
@Test
public void testAclPermissionParam() {
final AclPermissionParam p =
new AclPermissionParam("user::rwx,group::r--,other::rwx,user:user1:rwx");
List<AclEntry> setAclList =
AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",
true);
assertEquals(setAclList.toString(), p.getAclPermission(true)
.toString());
new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx");
try {
new AclPermissionParam("user::rw--,group::rwx-,other::rw-");
fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
new AclPermissionParam(
"user::rw-,group::rwx,other::rw-,user:user1:rwx,group:group1:rwx,other::rwx,mask::rwx,default:user:user1:rwx");
try {
new AclPermissionParam("user:r-,group:rwx,other:rw-");
fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new AclPermissionParam("default:::r-,default:group::rwx,other::rw-");
fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx");
fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testUserGroupOkAfterAlteringAclPattern() {
// Preserve default pattern value
AclPermissionParam.Domain oldDomain =
AclPermissionParam.getAclPermissionPattern();
// Override the pattern with one that accepts '@' and numbers
// in the first character of usernames/groupnames
String newPattern =
"^(default:)?(user|group|mask|other):" +
"[[0-9A-Za-z_][@A-Za-z0-9._-]]*:([rwx-]{3})?" +
"(,(default:)?(user|group|mask|other):" +
"[[0-9A-Za-z_][@A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
try {
AclPermissionParam.setAclPermissionPattern(newPattern);
String numericUserSpec = "user:110201:rwx";
AclPermissionParam aclNumericUserParam =
new AclPermissionParam(numericUserSpec);
assertEquals(numericUserSpec, aclNumericUserParam.getValue());
String oddGroupSpec = "group:foo@bar:rwx";
AclPermissionParam aclGroupWithDomainParam =
new AclPermissionParam(oddGroupSpec);
assertEquals(oddGroupSpec, aclGroupWithDomainParam.getValue());
} finally {
// Revert back to the default rules for remainder of tests
AclPermissionParam.setAclPermissionPattern(oldDomain);
}
}
@Test
public void testXAttrNameParam() {
final XAttrNameParam p = new XAttrNameParam("user.a1");
assertEquals(p.getXAttrName(), "user.a1");
}
@Test
public void testXAttrValueParam() throws IOException {
final XAttrValueParam p = new XAttrValueParam("0x313233");
assertArrayEquals(p.getXAttrValue(),
XAttrCodec.decodeValue("0x313233"));
}
@Test
public void testXAttrEncodingParam() {
final XAttrEncodingParam p = new XAttrEncodingParam(XAttrCodec.BASE64);
assertEquals(p.getEncoding(), XAttrCodec.BASE64);
final XAttrEncodingParam p1 = new XAttrEncodingParam(p.getValueString());
assertEquals(p1.getEncoding(), XAttrCodec.BASE64);
}
@Test
public void testXAttrSetFlagParam() {
EnumSet<XAttrSetFlag> flag = EnumSet.of(
XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE);
final XAttrSetFlagParam p = new XAttrSetFlagParam(flag);
assertEquals(p.getFlag(), flag);
final XAttrSetFlagParam p1 = new XAttrSetFlagParam(p.getValueString());
assertEquals(p1.getFlag(), flag);
}
@Test
public void testRenameOptionSetParam() {
final RenameOptionSetParam p = new RenameOptionSetParam(
Options.Rename.OVERWRITE, Options.Rename.NONE);
final RenameOptionSetParam p1 = new RenameOptionSetParam(
p.getValueString());
assertEquals(p1.getValue(), EnumSet.of(
Options.Rename.OVERWRITE, Options.Rename.NONE));
}
@Test
public void testSnapshotNameParam() {
final OldSnapshotNameParam s1 = new OldSnapshotNameParam("s1");
final SnapshotNameParam s2 = new SnapshotNameParam("s2");
assertEquals("s1", s1.getValue());
assertEquals("s2", s2.getValue());
}
@Test
public void testFsActionParam() {
new FsActionParam("rwx");
new FsActionParam("rw-");
new FsActionParam("r-x");
new FsActionParam("-wx");
new FsActionParam("r--");
new FsActionParam("-w-");
new FsActionParam("--x");
new FsActionParam("---");
try {
new FsActionParam("rw");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new FsActionParam("qwx");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new FsActionParam("qrwx");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new FsActionParam("rwxx");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new FsActionParam("xwr");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new FsActionParam("r-w");
fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testStartAfterParam() throws Exception {
String s = "/helloWorld";
StartAfterParam param = new StartAfterParam(s);
assertEquals(s, param.getValue());
}
@Test
public void testStoragePolicyParam() {
StoragePolicyParam p = new StoragePolicyParam(StoragePolicyParam.DEFAULT);
assertEquals(null, p.getValue());
p = new StoragePolicyParam("COLD");
assertEquals("COLD", p.getValue());
}
@Test
public void testNamespaceQuotaParam() {
NameSpaceQuotaParam p =
new NameSpaceQuotaParam(NameSpaceQuotaParam.DEFAULT);
assertEquals(Long.valueOf(NameSpaceQuotaParam.DEFAULT), p.getValue());
p = new NameSpaceQuotaParam(100L);
assertEquals(100L, p.getValue().longValue());
}
@Test
public void testStorageSpaceQuotaParam() {
StorageSpaceQuotaParam sp = new StorageSpaceQuotaParam(
StorageSpaceQuotaParam.DEFAULT);
assertEquals(Long.valueOf(StorageSpaceQuotaParam.DEFAULT),
sp.getValue());
sp = new StorageSpaceQuotaParam(100L);
assertEquals(100L, sp.getValue().longValue());
}
@Test
public void testStorageTypeParam() {
StorageTypeParam p = new StorageTypeParam(StorageTypeParam.DEFAULT);
assertNull(p.getValue());
p = new StorageTypeParam(StorageType.DISK.name());
assertEquals(StorageType.DISK.name(), p.getValue());
}
@Test
public void testECPolicyParam() {
ECPolicyParam p = new ECPolicyParam(ECPolicyParam.DEFAULT);
assertEquals(null, p.getValue());
p = new ECPolicyParam("RS-6-3-1024k");
assertEquals("RS-6-3-1024k", p.getValue());
}
@Test
public void testHttpOpParams() {
try {
new PostOpParam("TEST");
fail("Construct the PostOpParam with param value 'TEST' should be"
+ " failed.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"TEST is not a valid POST operation.", e);
}
try {
new PutOpParam("TEST");
fail("Construct the PutOpParam with param value 'TEST' should be"
+ " failed.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"TEST is not a valid PUT operation.", e);
}
try {
new DeleteOpParam("TEST");
fail("Construct the DeleteOpParam with param value 'TEST' should be"
+ " failed.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"TEST is not a valid DELETE operation.", e);
}
try {
new GetOpParam("TEST");
fail("Construct the GetOpParam with param value 'TEST' should be"
+ " failed.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"TEST is not a valid GET operation.", e);
}
}
}
| TestParam |
java | quarkusio__quarkus | extensions/infinispan-client/deployment/src/main/java/io/quarkus/infinispan/client/deployment/InfinispanClientProcessor.java | {
"start": 5133,
"end": 19205
} | class ____ {
private static final Log log = LogFactory.getLog(InfinispanClientProcessor.class);
private static final String SERVICE_BINDING_INTERFACE_NAME = "io.quarkus.kubernetes.service.binding.runtime.ServiceBindingConverter";
private static final DotName INFINISPAN_CLIENT_ANNOTATION = DotName.createSimple(InfinispanClientName.class.getName());
private static final DotName INFINISPAN_REMOTE_ANNOTATION = DotName.createSimple(Remote.class.getName());
private static final DotName INFINISPAN_CLIENT = DotName.createSimple(RemoteCacheManager.class.getName());
private static final DotName INFINISPAN_COUNTER_MANAGER = DotName.createSimple(CounterManager.class.getName());
private static final DotName INFINISPAN_CACHE_CLIENT = DotName.createSimple(RemoteCache.class.getName());
private static final String META_INF = "META-INF";
private static final String DEFAULT_HOTROD_CLIENT_PROPERTIES = "hotrod-client.properties";
private static final String PROTO_EXTENSION = ".proto";
private static final String SASL_SECURITY_PROVIDER = "com.sun.security.sasl.Provider";
private static final List<DotName> SUPPORTED_INJECTION_TYPE = List.of(
// Client types
INFINISPAN_CLIENT,
INFINISPAN_COUNTER_MANAGER,
INFINISPAN_CACHE_CLIENT);
/**
* The Infinispan client build time configuration.
*/
InfinispanClientsBuildTimeConfig infinispanClientsBuildTimeConfig;
@BuildStep(onlyIf = NativeOrNativeSourcesBuild.class)
NativeImageFeatureBuildItem nativeImageFeature() {
return new NativeImageFeatureBuildItem(DisableLoggingFeature.class);
}
@BuildStep
FeatureBuildItem feature() {
return new FeatureBuildItem(Feature.INFINISPAN_CLIENT);
}
/**
* Sets up additional properties for use when proto stream marshaller is in use
*/
@BuildStep
public void handleProtoStreamRequirements(BuildProducer<MarshallingBuildItem> protostreamPropertiesBuildItem)
throws ClassNotFoundException {
Properties properties = new Properties();
Map<String, Object> marshallers = new HashMap<>();
initMarshaller(InfinispanClientUtil.DEFAULT_INFINISPAN_CLIENT_NAME,
infinispanClientsBuildTimeConfig.defaultInfinispanClient().marshallerClass(), marshallers);
for (String clientName : infinispanClientsBuildTimeConfig.getInfinispanNamedClientConfigNames()) {
initMarshaller(clientName,
infinispanClientsBuildTimeConfig.getInfinispanClientBuildTimeConfig(clientName).marshallerClass(),
marshallers);
}
protostreamPropertiesBuildItem.produce(new MarshallingBuildItem(properties, marshallers));
}
private static void initMarshaller(String clientName, Optional<String> marshallerOpt, Map<String, Object> marshallers)
throws ClassNotFoundException {
if (marshallerOpt.isPresent()) {
Class<?> marshallerClass = Class.forName(
marshallerOpt.get(), false,
Thread.currentThread().getContextClassLoader());
marshallers.put(clientName, Util.getInstance(marshallerClass));
} else {
// Default to proto stream marshaller if one is not provided
marshallers.put(clientName, new ProtoStreamMarshaller());
}
}
@BuildStep
InfinispanPropertiesBuildItem setup(ApplicationArchivesBuildItem applicationArchivesBuildItem,
BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
BuildProducer<HotDeploymentWatchedFileBuildItem> hotDeployment,
BuildProducer<AdditionalBeanBuildItem> additionalBeans,
BuildProducer<ExtensionSslNativeSupportBuildItem> sslNativeSupport,
BuildProducer<NativeImageSecurityProviderBuildItem> nativeImageSecurityProviders,
BuildProducer<InfinispanClientNameBuildItem> infinispanClientNames,
MarshallingBuildItem marshallingBuildItem,
BuildProducer<NativeImageResourceBuildItem> resourceBuildItem,
CombinedIndexBuildItem applicationIndexBuildItem) throws ClassNotFoundException, IOException {
additionalBeans.produce(AdditionalBeanBuildItem.unremovableOf(InfinispanClientProducer.class));
additionalBeans.produce(AdditionalBeanBuildItem.unremovableOf(CacheInvalidateAllInterceptor.class));
additionalBeans.produce(AdditionalBeanBuildItem.unremovableOf(CacheResultInterceptor.class));
additionalBeans.produce(AdditionalBeanBuildItem.unremovableOf(CacheInvalidateInterceptor.class));
additionalBeans.produce(AdditionalBeanBuildItem.unremovableOf(SynchronousInfinispanGet.class));
additionalBeans.produce(AdditionalBeanBuildItem.builder().addBeanClass(InfinispanClientName.class).build());
additionalBeans.produce(AdditionalBeanBuildItem.builder().addBeanClass(Remote.class).build());
resourceBuildItem.produce(new NativeImageResourceBuildItem("org/infinispan/commons/query/client/query.proto"));
resourceBuildItem.produce(new NativeImageResourceBuildItem(WrappedMessage.PROTO_FILE));
hotDeployment
.produce(new HotDeploymentWatchedFileBuildItem(META_INF + File.separator + DEFAULT_HOTROD_CLIENT_PROPERTIES));
// Enable SSL support by default
sslNativeSupport.produce(new ExtensionSslNativeSupportBuildItem(Feature.INFINISPAN_CLIENT));
nativeImageSecurityProviders.produce(new NativeImageSecurityProviderBuildItem(SASL_SECURITY_PROVIDER));
// add per cache file config
handlePerCacheFileConfig(infinispanClientsBuildTimeConfig.defaultInfinispanClient(), resourceBuildItem, hotDeployment);
for (InfinispanClientBuildTimeConfig config : infinispanClientsBuildTimeConfig.namedInfinispanClients().values()) {
handlePerCacheFileConfig(config, resourceBuildItem, hotDeployment);
}
Map<String, Properties> propertiesMap = new HashMap<>();
IndexView index = applicationIndexBuildItem.getIndex();
// named and default
Set<String> allClientNames = infinispanClientNames(applicationIndexBuildItem, infinispanClientNames);
allClientNames.addAll(infinispanClientsBuildTimeConfig.getInfinispanNamedClientConfigNames());
allClientNames.add(DEFAULT_INFINISPAN_CLIENT_NAME);
for (String clientName : allClientNames) {
Properties properties = loadHotrodProperties(clientName, reflectiveClass, marshallingBuildItem);
propertiesMap.put(clientName, properties);
// This is always non-null
Object marshaller = properties.get(ConfigurationProperties.MARSHALLER);
if (marshaller instanceof ProtoStreamMarshaller) {
for (ApplicationArchive applicationArchive : applicationArchivesBuildItem.getAllApplicationArchives()) {
// If we have properties file we may have to care about
Path metaPath = applicationArchive.getChildPath(META_INF);
if (metaPath != null) {
try (Stream<Path> dirElements = Files.list(metaPath)) {
Iterator<Path> protoFiles = dirElements
.filter(Files::isRegularFile)
.filter(p -> p.toString().endsWith(PROTO_EXTENSION))
.iterator();
// We monitor the entire meta inf directory if properties are available
if (protoFiles.hasNext()) {
// Quarkus doesn't currently support hot deployment watching directories
// hotDeployment.produce(new HotDeploymentConfigFileBuildItem(META_INF));
}
while (protoFiles.hasNext()) {
Path path = protoFiles.next();
if (log.isDebugEnabled()) {
log.debug(" " + path.toAbsolutePath());
}
byte[] bytes = Files.readAllBytes(path);
// This uses the default file encoding - should we enforce UTF-8?
properties.put(PROTOBUF_FILE_PREFIX + path.getFileName().toString(),
new String(bytes, StandardCharsets.UTF_8));
}
}
}
}
properties.putAll(marshallingBuildItem.getProperties());
Collection<ClassInfo> schemaClasses = index.getAllKnownImplementations(DotName.createSimple(
Schema.class.getName()));
schemaClasses
.addAll(index.getAllKnownImplementations(DotName.createSimple(GeneratedSchema.class.getName())));
Set<Schema> schemas = new HashSet<>(schemaClasses.size());
for (ClassInfo ci : schemaClasses) {
Class<?> initializerClass = Thread.currentThread().getContextClassLoader().loadClass(ci.toString());
try {
Schema sci = (Schema) initializerClass
.getDeclaredConstructor().newInstance();
schemas.add(sci);
} catch (InstantiationException | IllegalAccessException | InvocationTargetException
| NoSuchMethodException e) {
// This shouldn't ever be possible as annotation processor should generate empty constructor
throw new RuntimeException(e);
}
}
if (!schemas.isEmpty()) {
properties.put(InfinispanClientProducer.PROTOBUF_SCHEMAS, schemas);
}
}
}
// Add any user project listeners to allow reflection in native code
Collection<AnnotationInstance> listenerInstances = index.getAnnotations(
DotName.createSimple(ClientListener.class.getName()));
for (AnnotationInstance instance : listenerInstances) {
AnnotationTarget target = instance.target();
if (target.kind() == AnnotationTarget.Kind.CLASS) {
reflectiveClass.produce(ReflectiveClassBuildItem.builder(
target.asClass().name().toString())
.methods().build());
}
}
// This is required for netty to work properly
reflectiveClass.produce(ReflectiveClassBuildItem.builder(
"io.netty.channel.socket.nio.NioSocketChannel").build());
// We use reflection to have continuous queries work
reflectiveClass.produce(ReflectiveClassBuildItem.builder(
"org.infinispan.client.hotrod.event.impl.ContinuousQueryImpl$ClientEntryListener")
.methods().build());
// We use reflection to allow for near cache invalidations
reflectiveClass.produce(ReflectiveClassBuildItem.builder(
"org.infinispan.client.hotrod.near.NearCacheService$InvalidatedNearCacheListener")
.methods().build());
// This is required when a cache is clustered to tell us topology
reflectiveClass.produce(
ReflectiveClassBuildItem.builder(
"org.infinispan.client.hotrod.impl.consistenthash.SegmentConsistentHash")
.build());
// Elytron Classes
String[] elytronClasses = new String[] {
"org.wildfly.security.sasl.plain.PlainSaslClientFactory",
"org.wildfly.security.sasl.scram.ScramSaslClientFactory",
"org.wildfly.security.sasl.digest.DigestClientFactory",
"org.wildfly.security.credential.BearerTokenCredential",
"org.wildfly.security.credential.GSSKerberosCredential",
"org.wildfly.security.credential.KeyPairCredential",
"org.wildfly.security.credential.PasswordCredential",
"org.wildfly.security.credential.PublicKeyCredential",
"org.wildfly.security.credential.SecretKeyCredential",
"org.wildfly.security.credential.SSHCredential",
"org.wildfly.security.digest.SHA512_256MessageDigest",
"org.wildfly.security.credential.X509CertificateChainPrivateCredential"
};
reflectiveClass.produce(ReflectiveClassBuildItem.builder(elytronClasses).reason(getClass().getName()).build());
return new InfinispanPropertiesBuildItem(propertiesMap);
}
private void handlePerCacheFileConfig(InfinispanClientBuildTimeConfig config,
BuildProducer<NativeImageResourceBuildItem> resourceBuildItem,
BuildProducer<HotDeploymentWatchedFileBuildItem> hotDeployment) {
for (InfinispanClientBuildTimeConfig.RemoteCacheConfig cacheConfig : config.cache().values()) {
if (cacheConfig.configurationResource().isPresent()) {
resourceBuildItem.produce(new NativeImageResourceBuildItem(cacheConfig.configurationResource().get()));
hotDeployment.produce(new HotDeploymentWatchedFileBuildItem(cacheConfig.configurationResource().get()));
}
}
}
@BuildStep
@Record(ExecutionTime.STATIC_INIT)
BeanContainerListenerBuildItem build(InfinispanRecorder recorder, InfinispanPropertiesBuildItem builderBuildItem) {
Map<String, Properties> propertiesMap = builderBuildItem.getProperties();
// This is necessary to be done for Protostream Marshaller init in native
return new BeanContainerListenerBuildItem(recorder.configureInfinispan(propertiesMap));
}
/**
* Reads all the contents of the file as a single string using default charset
*
* @param fileName file on | InfinispanClientProcessor |
java | google__guava | android/guava/src/com/google/common/collect/ForwardingSortedMap.java | {
"start": 3892,
"end": 5788
} | class ____ extends Maps.SortedKeySet<K, V> {
/** Constructor for use by subclasses. */
public StandardKeySet() {
super(ForwardingSortedMap.this);
}
}
// unsafe, but worst case is a CCE or NPE is thrown, which callers will be expecting
@SuppressWarnings({"unchecked", "nullness"})
static int unsafeCompare(
@Nullable Comparator<?> comparator, @Nullable Object o1, @Nullable Object o2) {
if (comparator == null) {
return ((Comparable<@Nullable Object>) o1).compareTo(o2);
} else {
return ((Comparator<@Nullable Object>) comparator).compare(o1, o2);
}
}
/**
* A sensible definition of {@link #containsKey} in terms of the {@code firstKey()} method of
* {@link #tailMap}. If you override {@link #tailMap}, you may wish to override {@link
* #containsKey} to forward to this implementation.
*
* @since 7.0
*/
@Override
protected boolean standardContainsKey(@Nullable Object key) {
try {
// any CCE or NPE will be caught
@SuppressWarnings({"unchecked", "nullness"})
SortedMap<@Nullable Object, V> self = (SortedMap<@Nullable Object, V>) this;
Object ceilingKey = self.tailMap(key).firstKey();
return unsafeCompare(comparator(), ceilingKey, key) == 0;
} catch (ClassCastException | NoSuchElementException | NullPointerException e) {
return false;
}
}
/**
* A sensible default implementation of {@link #subMap(Object, Object)} in terms of {@link
* #headMap(Object)} and {@link #tailMap(Object)}. In some situations, you may wish to override
* {@link #subMap(Object, Object)} to forward to this implementation.
*
* @since 7.0
*/
protected SortedMap<K, V> standardSubMap(K fromKey, K toKey) {
checkArgument(unsafeCompare(comparator(), fromKey, toKey) <= 0, "fromKey must be <= toKey");
return tailMap(fromKey).headMap(toKey);
}
}
| StandardKeySet |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/NonFinalCompileTimeConstantTest.java | {
"start": 2700,
"end": 3069
} | class ____ {
public void f(final @CompileTimeConstant Object x) {}
}
""")
.doTest();
}
@Test
public void negativeEffectivelyFinal() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.CompileTimeConstant;
public | Test |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/MaterializedInterfaceTest.java | {
"start": 578,
"end": 720
} | interface ____ {
int getId();
void setId(int value);
String getName();
void setName(String value);
}
}
| Bean |
java | apache__camel | test-infra/camel-test-infra-xmpp/src/main/java/org/apache/camel/test/infra/xmpp/common/XmppProperties.java | {
"start": 867,
"end": 1216
} | class ____ {
public static final String XMPP_CONTAINER = "xmpp.container";
public static final String XMPP_URL = "xmpp.url";
public static final String XMPP_HOST = "xmpp.host";
public static final String XMPP_PORT = "xmpp.port";
public static final Integer PORT_DEFAULT = 5222;
private XmppProperties() {
}
}
| XmppProperties |
java | spring-projects__spring-framework | spring-context-indexer/src/main/java/org/springframework/context/index/processor/IndexedStereotypesProvider.java | {
"start": 1167,
"end": 3877
} | class ____ implements StereotypesProvider {
private static final String INDEXED_ANNOTATION = "org.springframework.stereotype.Indexed";
private final TypeHelper typeHelper;
public IndexedStereotypesProvider(TypeHelper typeHelper) {
this.typeHelper = typeHelper;
}
@Override
public Set<String> getStereotypes(Element element) {
Set<String> stereotypes = new LinkedHashSet<>();
ElementKind kind = element.getKind();
if (!kind.isClass() && kind != ElementKind.INTERFACE) {
return stereotypes;
}
Set<Element> seen = new HashSet<>();
collectStereotypesOnAnnotations(seen, stereotypes, element);
seen = new HashSet<>();
collectStereotypesOnTypes(seen, stereotypes, element);
return stereotypes;
}
private void collectStereotypesOnAnnotations(Set<Element> seen, Set<String> stereotypes, Element element) {
for (AnnotationMirror annotation : this.typeHelper.getAllAnnotationMirrors(element)) {
Element next = collectStereotypes(seen, stereotypes, element, annotation);
if (next != null) {
collectStereotypesOnAnnotations(seen, stereotypes, next);
}
}
}
private void collectStereotypesOnTypes(Set<Element> seen, Set<String> stereotypes, Element type) {
if (!seen.contains(type)) {
seen.add(type);
if (isAnnotatedWithIndexed(type)) {
stereotypes.add(this.typeHelper.getType(type));
}
Element superClass = this.typeHelper.getSuperClass(type);
if (superClass != null) {
collectStereotypesOnTypes(seen, stereotypes, superClass);
}
this.typeHelper.getDirectInterfaces(type).forEach(
i -> collectStereotypesOnTypes(seen, stereotypes, i));
}
}
private Element collectStereotypes(Set<Element> seen, Set<String> stereotypes, Element element,
AnnotationMirror annotation) {
if (isIndexedAnnotation(annotation)) {
stereotypes.add(this.typeHelper.getType(element));
}
return getCandidateAnnotationElement(seen, annotation);
}
private Element getCandidateAnnotationElement(Set<Element> seen, AnnotationMirror annotation) {
Element element = annotation.getAnnotationType().asElement();
if (seen.contains(element)) {
return null;
}
// We need to visit all indexed annotations.
if (!isIndexedAnnotation(annotation)) {
seen.add(element);
}
return (!element.toString().startsWith("java.lang") ? element : null);
}
private boolean isAnnotatedWithIndexed(Element type) {
for (AnnotationMirror annotation : type.getAnnotationMirrors()) {
if (isIndexedAnnotation(annotation)) {
return true;
}
}
return false;
}
private boolean isIndexedAnnotation(AnnotationMirror annotation) {
return INDEXED_ANNOTATION.equals(annotation.getAnnotationType().toString());
}
}
| IndexedStereotypesProvider |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/disk/iomanager/IORequest.java | {
"start": 1771,
"end": 2046
} | interface ____ extends IORequest {
/**
* Called by the target I/O thread to perform the actual writing operation.
*
* @throws IOException My be thrown by the method to indicate an I/O problem.
*/
public void write() throws IOException;
}
| WriteRequest |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/spi/DefaultAccessorNamingStrategy.java | {
"start": 839,
"end": 10455
} | class ____ implements AccessorNamingStrategy {
private static final Pattern JAVA_JAVAX_PACKAGE = Pattern.compile( "^javax?\\..*" );
protected Elements elementUtils;
protected Types typeUtils;
@Override
public void init(MapStructProcessingEnvironment processingEnvironment) {
this.elementUtils = processingEnvironment.getElementUtils();
this.typeUtils = processingEnvironment.getTypeUtils();
}
@Override
public MethodType getMethodType(ExecutableElement method) {
if ( isGetterMethod( method ) ) {
return MethodType.GETTER;
}
else if ( isSetterMethod( method ) ) {
return MethodType.SETTER;
}
else if ( isAdderMethod( method ) ) {
return MethodType.ADDER;
}
else if ( isPresenceCheckMethod( method ) ) {
return MethodType.PRESENCE_CHECKER;
}
else {
return MethodType.OTHER;
}
}
/**
* Returns {@code true} when the {@link ExecutableElement} is a getter method. A method is a getter when it
* has no parameters, starts
* with 'get' and the return type is any type other than {@code void}, OR the getter starts with 'is' and the type
* returned is a primitive or the wrapper for {@code boolean}. NOTE: the latter does strictly not comply to the bean
* convention. The remainder of the name is supposed to reflect the property name.
* <p>
* The calling MapStruct code guarantees that the given method has no arguments.
*
* @param method to be analyzed
*
* @return {@code true} when the method is a getter.
*/
public boolean isGetterMethod(ExecutableElement method) {
if ( !method.getParameters().isEmpty() ) {
// If the method has parameters it can't be a getter
return false;
}
String methodName = method.getSimpleName().toString();
boolean isNonBooleanGetterName = methodName.startsWith( "get" ) && methodName.length() > 3 &&
method.getReturnType().getKind() != TypeKind.VOID;
boolean isBooleanGetterName = methodName.startsWith( "is" ) && methodName.length() > 2;
boolean returnTypeIsBoolean = method.getReturnType().getKind() == TypeKind.BOOLEAN ||
"java.lang.Boolean".equals( getQualifiedName( method.getReturnType() ) );
return isNonBooleanGetterName || ( isBooleanGetterName && returnTypeIsBoolean );
}
/**
* Returns {@code true} when the {@link ExecutableElement} is a setter method. A setter starts with 'set'. The
* remainder of the name is supposed to reflect the property name.
* <p>
* The calling MapStruct code guarantees that there's only one argument.
*
* @param method to be analyzed
* @return {@code true} when the method is a setter.
*/
public boolean isSetterMethod(ExecutableElement method) {
String methodName = method.getSimpleName().toString();
return methodName.startsWith( "set" ) && methodName.length() > 3 || isFluentSetter( method );
}
protected boolean isFluentSetter(ExecutableElement method) {
return method.getParameters().size() == 1 &&
!JAVA_JAVAX_PACKAGE.matcher( method.getEnclosingElement().asType().toString() ).matches() &&
!isAdderWithUpperCase4thCharacter( method ) &&
typeUtils.isAssignable( method.getReturnType(), method.getEnclosingElement().asType() );
}
/**
* Checks that the method is an adder with an upper case 4th character. The reason for this is that methods such
* as {@code address(String address)} are considered as setter and {@code addName(String name)} too. We need to
* make sure that {@code addName} is considered as an adder and {@code address} is considered as a setter.
*
* @param method the method that needs to be checked
*
* @return {@code true} if the method is an adder with an upper case 4h character, {@code false} otherwise
*/
private boolean isAdderWithUpperCase4thCharacter(ExecutableElement method) {
return isAdderMethod( method ) && Character.isUpperCase( method.getSimpleName().toString().charAt( 3 ) );
}
/**
* Returns {@code true} when the {@link ExecutableElement} is an adder method. An adder method starts with 'add'.
* The remainder of the name is supposed to reflect the <em>singular</em> property name (as opposed to plural) of
* its corresponding property. For example: property "children", but "addChild". See also
* {@link #getElementName(ExecutableElement) }.
* <p>
* The calling MapStruct code guarantees there's only one argument.
* <p>
*
* @param method to be analyzed
*
* @return {@code true} when the method is an adder method.
*/
public boolean isAdderMethod(ExecutableElement method) {
String methodName = method.getSimpleName().toString();
return methodName.startsWith( "add" ) && methodName.length() > 3;
}
/**
* Returns {@code true} when the {@link ExecutableElement} is a <em>presence check</em> method that checks if the
* corresponding property is present (e.g. not null, not nil, ..). A presence check method method starts with
* 'has'. The remainder of the name is supposed to reflect the property name.
* <p>
* The calling MapStruct code guarantees there's no argument and that the return type is boolean or a
* {@link Boolean}
*
* @param method to be analyzed
* @return {@code true} when the method is a presence check method.
*/
public boolean isPresenceCheckMethod(ExecutableElement method) {
String methodName = method.getSimpleName().toString();
return methodName.startsWith( "has" ) && methodName.length() > 3;
}
/**
* Analyzes the method (getter or setter) and derives the property name.
* See {@link #isGetterMethod(ExecutableElement)} {@link #isSetterMethod(ExecutableElement)}. The first three
* ('get' / 'set' scenario) characters are removed from the simple name, or the first 2 characters ('is' scenario).
* From the remainder the first character is made into small case (to counter camel casing) and the result forms
* the property name.
*
* @param getterOrSetterMethod getter or setter method.
*
* @return the property name.
*/
@Override
public String getPropertyName(ExecutableElement getterOrSetterMethod) {
String methodName = getterOrSetterMethod.getSimpleName().toString();
if ( isFluentSetter( getterOrSetterMethod ) ) {
// If this is a fluent setter that starts with set and the 4th character is an uppercase one
// then we treat it as a Java Bean style method (we get the property starting from the 4th character).
// Otherwise we treat it as a fluent setter
// For example, for the following methods:
// * public Builder setSettlementDate(String settlementDate)
// * public Builder settlementDate(String settlementDate)
// We are going to extract the same property name settlementDate
if ( methodName.startsWith( "set" )
&& methodName.length() > 3
&& Character.isUpperCase( methodName.charAt( 3 ) ) ) {
return IntrospectorUtils.decapitalize( methodName.substring( 3 ) );
}
else {
return methodName;
}
}
return IntrospectorUtils.decapitalize( methodName.substring( methodName.startsWith( "is" ) ? 2 : 3 ) );
}
/**
* Adder methods are used to add elements to collections on a target bean. A typical use case is JPA. The
* convention is that the element name will be equal to the remainder of the add method. Example: 'addElement'
* element name will be 'element'.
*
* @param adderMethod getter or setter method.
*
* @return the property name.
*/
@Override
public String getElementName(ExecutableElement adderMethod) {
String methodName = adderMethod.getSimpleName().toString();
return IntrospectorUtils.decapitalize( methodName.substring( 3 ) );
}
/**
* Helper method, to obtain the fully qualified name of a type.
*
* @param type input type
*
* @return fully qualified name of type when the type is a {@link DeclaredType}, null when otherwise.
*/
protected static String getQualifiedName(TypeMirror type) {
DeclaredType declaredType = type.accept(
new SimpleTypeVisitor6<DeclaredType, Void>() {
@Override
public DeclaredType visitDeclared(DeclaredType t, Void p) {
return t;
}
},
null
);
if ( declaredType == null ) {
return null;
}
TypeElement typeElement = declaredType.asElement().accept(
new SimpleElementVisitor6<TypeElement, Void>() {
@Override
public TypeElement visitType(TypeElement e, Void p) {
return e;
}
},
null
);
return typeElement != null ? typeElement.getQualifiedName().toString() : null;
}
@Override
public String getCollectionGetterName(String property) {
throw new IllegalStateException( "This method is not intended to be called anymore and will be removed in "
+ "future versions." );
}
}
| DefaultAccessorNamingStrategy |
java | spring-projects__spring-framework | spring-websocket/src/test/java/org/springframework/web/socket/server/standard/ServerEndpointRegistrationTests.java | {
"start": 2188,
"end": 2295
} | class ____ {
@Bean
EchoService echoService() {
return new EchoService();
}
}
private static | Config |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/howto/dataaccess/configurehibernatenamingstrategy/standard/MyHibernateConfiguration.java | {
"start": 956,
"end": 1130
} | class ____ {
@Bean
PhysicalNamingStrategyStandardImpl caseSensitivePhysicalNamingStrategy() {
return new PhysicalNamingStrategyStandardImpl();
}
}
| MyHibernateConfiguration |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/core/Single.java | {
"start": 200703,
"end": 201454
} | class ____<T> implements SingleObserver<T>, Disposable {
*
* // The downstream's SingleObserver that will receive the onXXX events
* final SingleObserver<? super String> downstream;
*
* // The connection to the upstream source that will call this class' onXXX methods
* Disposable upstream;
*
* // The constructor takes the downstream subscriber and usually any other parameters
* public CustomSingleObserver(SingleObserver<? super String> downstream) {
* this.downstream = downstream;
* }
*
* // In the subscription phase, the upstream sends a Disposable to this class
* // and subsequently this | CustomSingleObserver |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/test/fakedns/FakeDNSServer.java | {
"start": 15535,
"end": 16258
} | class ____ implements ProtocolCodecFactory {
@Override
public ProtocolEncoder getEncoder(IoSession session) throws Exception {
return new DnsUdpEncoder() {
@Override
public void encode(IoSession session, Object message, ProtocolEncoderOutput out) {
IoBuffer buf = IoBuffer.allocate( 1024 );
FakeDNSServer.this.encode((DnsMessage)message, buf);
buf.flip();
out.write( buf );
}
};
}
@Override
public ProtocolDecoder getDecoder(IoSession session) throws Exception {
return new DnsUdpDecoder();
}
}
/**
* ProtocolCodecFactory which allows to test AAAA resolution
*/
private final | TestDnsProtocolUdpCodecFactory |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigTests.java | {
"start": 617,
"end": 2597
} | class ____ extends InferenceConfigItemTestCase<PassThroughConfig> {
public static PassThroughConfig mutateForVersion(PassThroughConfig instance, TransportVersion version) {
return new PassThroughConfig(
instance.getVocabularyConfig(),
InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version),
instance.getResultsField()
);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return field -> field.isEmpty() == false;
}
@Override
protected PassThroughConfig doParseInstance(XContentParser parser) throws IOException {
return PassThroughConfig.fromXContentLenient(parser);
}
@Override
protected Writeable.Reader<PassThroughConfig> instanceReader() {
return PassThroughConfig::new;
}
@Override
protected PassThroughConfig createTestInstance() {
return createRandom();
}
@Override
protected PassThroughConfig mutateInstance(PassThroughConfig instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected PassThroughConfig mutateInstanceForVersion(PassThroughConfig instance, TransportVersion version) {
return mutateForVersion(instance, version);
}
public static PassThroughConfig createRandom() {
return new PassThroughConfig(
randomBoolean() ? null : VocabularyConfigTests.createRandom(),
randomBoolean()
? null
: randomFrom(
BertTokenizationTests.createRandom(),
MPNetTokenizationTests.createRandom(),
RobertaTokenizationTests.createRandom()
),
randomBoolean() ? null : randomAlphaOfLength(7)
);
}
}
| PassThroughConfigTests |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java | {
"start": 5061,
"end": 23846
} | class ____ implements
Iterator<FsVolumeSpi> {
private final List<FsVolumeReference> references;
private int idx = 0;
FsVolumeSpiIterator(List<FsVolumeReference> refs) {
references = refs;
}
@Override
public boolean hasNext() {
return idx < references.size();
}
@Override
public FsVolumeSpi next() {
int refIdx = idx++;
return references.get(refIdx).getVolume();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
@Override
public Iterator<FsVolumeSpi> iterator() {
return new FsVolumeSpiIterator(references);
}
/**
* Get the number of volumes.
*/
public int size() {
return references.size();
}
/**
* Get the volume for a given index.
*/
public FsVolumeSpi get(int index) {
return references.get(index).getVolume();
}
/**
* Get the reference for a given index.
*/
public FsVolumeReference getReference(int index) {
return references.get(index);
}
@Override
public void close() throws IOException {
IOException ioe = null;
for (FsVolumeReference ref : references) {
try {
ref.close();
} catch (IOException e) {
ioe = e;
}
}
references.clear();
if (ioe != null) {
throw ioe;
}
}
}
/**
* Returns a list of FsVolumes that hold reference counts.
*
* The caller must release the reference of each volume by calling
* {@link FsVolumeReferences#close()}.
*/
FsVolumeReferences getFsVolumeReferences();
/**
* Add a new volume to the FsDataset.
*
* If the FSDataset supports block scanning, this function registers
* the new volume with the block scanner.
*
* @param location The storage location for the new volume.
* @param nsInfos Namespace information for the new volume.
*/
void addVolume(
final StorageLocation location,
final List<NamespaceInfo> nsInfos) throws IOException;
/**
* Removes a collection of volumes from FsDataset.
*
* If the FSDataset supports block scanning, this function removes
* the volumes from the block scanner.
*
* @param volumes The paths of the volumes to be removed.
* @param clearFailure set true to clear the failure information about the
* volumes.
*/
void removeVolumes(Collection<StorageLocation> volumes, boolean clearFailure);
/** @return a storage with the given storage ID */
DatanodeStorage getStorage(final String storageUuid);
/** @return one or more storage reports for attached volumes. */
StorageReport[] getStorageReports(String bpid)
throws IOException;
/** @return the volume that contains a replica of the block. */
V getVolume(ExtendedBlock b);
/** @return a volume information map (name {@literal =>} info). */
Map<String, Object> getVolumeInfoMap();
/**
* Returns info about volume failures.
*
* @return info about volume failures, possibly null
*/
VolumeFailureSummary getVolumeFailureSummary();
/**
* Gets a list of references to the finalized blocks for the given block pool.
* <p>
* Callers of this function should call
* {@link FsDatasetSpi#acquireDatasetLockManager} to avoid blocks' status being
* changed during list iteration.
* </p>
* @return a list of references to the finalized blocks for the given block
* pool.
*/
List<ReplicaInfo> getFinalizedBlocks(String bpid);
/**
* Check whether the in-memory block record matches the block on the disk,
* and, in case that they are not matched, update the record or mark it
* as corrupted.
*/
void checkAndUpdate(String bpid, ScanInfo info) throws IOException;
/**
* @param b - the block
* @return a stream if the meta-data of the block exists;
* otherwise, return null.
* @throws IOException
*/
LengthInputStream getMetaDataInputStream(ExtendedBlock b
) throws IOException;
/**
* Returns the specified block's on-disk length (excluding metadata).
* @return the specified block's on-disk length (excluding metadta)
* @throws IOException on error
*/
long getLength(ExtendedBlock b) throws IOException;
/**
* Get reference to the replica meta info in the replicasMap.
* To be called from methods that are synchronized on
* implementations of {@link FsDatasetSpi}
* @return replica from the replicas map
*/
@Deprecated
Replica getReplica(String bpid, long blockId);
/**
* @return replica meta information
*/
String getReplicaString(String bpid, long blockId);
/**
* @return the generation stamp stored with the block.
*/
Block getStoredBlock(String bpid, long blkid) throws IOException;
/**
* Returns an input stream at specified offset of the specified block.
* @param b block
* @param seekOffset offset with in the block to seek to
* @return an input stream to read the contents of the specified block,
* starting at the offset
* @throws IOException
*/
InputStream getBlockInputStream(ExtendedBlock b, long seekOffset)
throws IOException;
/**
* Returns an input stream at specified offset of the specified block.
* The block is still in the tmp directory and is not finalized
* @return an input stream to read the contents of the specified block,
* starting at the offset
* @throws IOException
*/
ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
long ckoff) throws IOException;
/**
* Creates a temporary replica and returns the meta information of the replica
* .
*
* @param b block
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
ReplicaHandler createTemporary(StorageType storageType, String storageId,
ExtendedBlock b, boolean isTransfer) throws IOException;
/**
* Creates a RBW replica and returns the meta info of the replica
*
* @param b block
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
ReplicaHandler createRbw(StorageType storageType, String storageId,
ExtendedBlock b, boolean allowLazyPersist) throws IOException;
/**
* Creates a RBW replica and returns the meta info of the replica
*
* @param b block
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
ReplicaHandler createRbw(StorageType storageType, String storageId,
ExtendedBlock b, boolean allowLazyPersist, long newGS) throws IOException;
/**
* Recovers a RBW replica and returns the meta info of the replica.
*
* @param b block
* @param newGS the new generation stamp for the replica
* @param minBytesRcvd the minimum number of bytes that the replica could have
* @param maxBytesRcvd the maximum number of bytes that the replica could have
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
ReplicaHandler recoverRbw(ExtendedBlock b,
long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException;
/**
* Covert a temporary replica to a RBW.
* @param temporary the temporary replica being converted
* @return the result RBW
*/
ReplicaInPipeline convertTemporaryToRbw(
ExtendedBlock temporary) throws IOException;
/**
* Append to a finalized replica and returns the meta info of the replica.
*
* @param b block
* @param newGS the new generation stamp for the replica
* @param expectedBlockLen the number of bytes the replica is expected to have
* @return the meata info of the replica which is being written to
* @throws IOException
*/
ReplicaHandler append(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException;
/**
* Recover a failed append to a finalized replica and returns the meta
* info of the replica.
*
* @param b block
* @param newGS the new generation stamp for the replica
* @param expectedBlockLen the number of bytes the replica is expected to have
* @return the meta info of the replica which is being written to
* @throws IOException
*/
ReplicaHandler recoverAppend(
ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException;
/**
* Recover a failed pipeline close.
* It bumps the replica's generation stamp and finalize it if RBW replica
*
* @param b block
* @param newGS the new generation stamp for the replica
* @param expectedBlockLen the number of bytes the replica is expected to have
* @return the storage uuid of the replica.
* @throws IOException
*/
Replica recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen
) throws IOException;
/**
* Finalizes the block previously opened for writing using writeToBlock.
* The block size is what is in the parameter b and it must match the amount
* of data written
* @param b Block to be finalized
* @param fsyncDir whether to sync the directory changes to durable device.
* @throws IOException
* @throws ReplicaNotFoundException if the replica can not be found when the
* block is been finalized. For instance, the block resides on an HDFS volume
* that has been removed.
*/
void finalizeBlock(ExtendedBlock b, boolean fsyncDir) throws IOException;
/**
* Unfinalizes the block previously opened for writing using writeToBlock.
* The temporary file associated with this block is deleted.
* @throws IOException
*/
void unfinalizeBlock(ExtendedBlock b) throws IOException;
/**
* Returns one block report per volume.
* @param bpid Block Pool Id
* @return - a map of DatanodeStorage to block report for the volume.
*/
Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid);
/**
* Returns the cache report - the full list of cached block IDs of a
* block pool.
* @param bpid Block Pool Id
* @return the cache report - the full list of cached block IDs.
*/
List<Long> getCacheReport(String bpid);
/** Does the dataset contain the block? */
boolean contains(ExtendedBlock block);
/**
* Check if a block is valid.
*
* @param b The block to check.
* @param minLength The minimum length that the block must have. May be 0.
* @param state If this is null, it is ignored. If it is non-null, we
* will check that the replica has this state.
*
* @throws ReplicaNotFoundException If the replica is not found
*
* @throws UnexpectedReplicaStateException If the replica is not in the
* expected state.
* @throws FileNotFoundException If the block file is not found or there
* was an error locating it.
* @throws EOFException If the replica length is too short.
*
* @throws IOException May be thrown from the methods called.
*/
void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
throws ReplicaNotFoundException, UnexpectedReplicaStateException,
FileNotFoundException, EOFException, IOException;
/**
* Is the block valid?
* @return - true if the specified block is valid
*/
boolean isValidBlock(ExtendedBlock b);
/**
* Is the block a valid RBW?
* @return - true if the specified block is a valid RBW
*/
boolean isValidRbw(ExtendedBlock b);
/**
* Invalidates the specified blocks.
* @param bpid Block pool Id
* @param invalidBlks - the blocks to be invalidated
* @throws IOException
*/
void invalidate(String bpid, Block invalidBlks[]) throws IOException;
/**
* Invalidate a block which is not found on disk.
* @param bpid the block pool ID.
* @param block The block to be invalidated.
*/
void invalidateMissingBlock(String bpid, Block block) throws IOException;
/**
* Caches the specified block
* @param bpid Block pool id
* @param blockIds - block ids to cache
*/
void cache(String bpid, long[] blockIds);
/**
* Uncaches the specified blocks
* @param bpid Block pool id
* @param blockIds - blocks ids to uncache
*/
void uncache(String bpid, long[] blockIds);
/**
* Determine if the specified block is cached.
* @param bpid Block pool id
* @param blockId - block id
* @return true if the block is cached
*/
boolean isCached(String bpid, long blockId);
/**
* Check if all the data directories are healthy
* @param failedVolumes
*/
void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes);
/**
* Shutdown the FSDataset
*/
void shutdown();
/**
* Sets the file pointer of the checksum stream so that the last checksum
* will be overwritten
* @param b block
* @param outs The streams for the data file and checksum file
* @param checksumSize number of bytes each checksum has
* @throws IOException
*/
void adjustCrcChannelPosition(ExtendedBlock b,
ReplicaOutputStreams outs, int checksumSize) throws IOException;
/**
* Checks how many valid storage volumes there are in the DataNode.
* @return true if more than the minimum number of valid volumes are left
* in the FSDataSet.
*/
boolean hasEnoughResource();
/**
* Get visible length of the specified replica.
*/
long getReplicaVisibleLength(final ExtendedBlock block) throws IOException;
/**
* Initialize a replica recovery.
* @return actual state of the replica on this data-node or
* null if data-node does not have the replica.
*/
ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock
) throws IOException;
/**
* Update replica's generation stamp and length and finalize it.
* @return the ID of storage that stores the block
*/
Replica updateReplicaUnderRecovery(ExtendedBlock oldBlock,
long recoveryId, long newBlockId, long newLength) throws IOException;
/**
* add new block pool ID
* @param bpid Block pool Id
* @param conf Configuration
*/
void addBlockPool(String bpid, Configuration conf) throws IOException;
/**
* Shutdown and remove the block pool from underlying storage.
* @param bpid Block pool Id to be removed
*/
void shutdownBlockPool(String bpid) ;
/**
* Deletes the block pool directories. If force is false, directories are
* deleted only if no block files exist for the block pool. If force
* is true entire directory for the blockpool is deleted along with its
* contents.
* @param bpid BlockPool Id to be deleted.
* @param force If force is false, directories are deleted only if no
* block files exist for the block pool, otherwise entire
* directory for the blockpool is deleted along with its contents.
* @throws IOException
*/
void deleteBlockPool(String bpid, boolean force) throws IOException;
/**
* Get {@link BlockLocalPathInfo} for the given block.
*/
BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b
) throws IOException;
/**
* Enable 'trash' for the given dataset. When trash is enabled, files are
* moved to a separate trash directory instead of being deleted immediately.
* This can be useful for example during rolling upgrades.
*/
void enableTrash(String bpid);
/**
* Clear trash
*/
void clearTrash(String bpid);
/**
* @return true when trash is enabled
*/
boolean trashEnabled(String bpid);
/**
* Create a marker file indicating that a rolling upgrade is in progress.
*/
void setRollingUpgradeMarker(String bpid) throws IOException;
/**
* Delete the rolling upgrade marker file if it exists.
* @param bpid
*/
void clearRollingUpgradeMarker(String bpid) throws IOException;
/**
* submit a sync_file_range request to AsyncDiskService.
*/
void submitBackgroundSyncFileRangeRequest(final ExtendedBlock block,
final ReplicaOutputStreams outs, final long offset, final long nbytes,
final int flags);
/**
* Callback from RamDiskAsyncLazyPersistService upon async lazy persist task end
*/
void onCompleteLazyPersist(String bpId, long blockId,
long creationTime, File[] savedFiles, V targetVolume);
/**
* Callback from RamDiskAsyncLazyPersistService upon async lazy persist task fail
*/
void onFailLazyPersist(String bpId, long blockId);
/**
* Move block from one storage to another storage
*/
ReplicaInfo moveBlockAcrossStorage(final ExtendedBlock block,
StorageType targetStorageType, String storageId) throws IOException;
/**
* Set a block to be pinned on this datanode so that it cannot be moved
* by Balancer/Mover.
*
* It is a no-op when dfs.datanode.block-pinning.enabled is set to false.
*/
void setPinning(ExtendedBlock block) throws IOException;
/**
* Check whether the block was pinned
*/
boolean getPinning(ExtendedBlock block) throws IOException;
/**
* Confirm whether the block is deleting
*/
boolean isDeletingBlock(String bpid, long blockId);
/**
* Moves a given block from one volume to another volume. This is used by disk
* balancer.
*
* @param block - ExtendedBlock
* @param destination - Destination volume
* @return Old replica info
*/
ReplicaInfo moveBlockAcrossVolumes(final ExtendedBlock block,
FsVolumeSpi destination) throws IOException;
/***
* Acquire lock Manager for the data set. This prevents other threads from
* modifying the volume map structure inside the datanode.
* @return The AutoClosable read lock instance.
*/
DataNodeLockManager<? extends AutoCloseDataSetLock> acquireDatasetLockManager();
/**
* Deep copy the replica info belonging to given block pool.
* @param bpid Specified block pool id.
* @return A set of replica info.
* @throws IOException
*/
Set<? extends Replica> deepCopyReplica(String bpid) throws IOException;
/**
* Get relationship between disk mount and FsVolume.
* @return Disk mount and FsVolume relationship.
* @throws IOException
*/
MountVolumeMap getMountVolumeMap() throws IOException;
/**
* Get the volume list.
*/
List<FsVolumeImpl> getVolumeList();
/**
* Set the last time in milliseconds when the directory scanner successfully ran.
* @param time the last time in milliseconds when the directory scanner successfully ran.
*/
default void setLastDirScannerFinishTime(long time) {}
}
| FsVolumeSpiIterator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.