language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/PatternRuleTests.java | {
"start": 567,
"end": 3133
} | class ____ extends ESTestCase {
public void testSingleIpRule() throws UnknownHostException {
PatternRule rule = new PatternRule(IpFilterRuleType.REJECT, "i:127.0.0.1");
assertFalse(rule.isLocalhost());
assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0)));
assertEquals(IpFilterRuleType.REJECT, rule.ruleType());
rule = new PatternRule(IpFilterRuleType.REJECT, "i:192.168.*");
assertFalse(rule.isLocalhost());
assertFalse(rule.matches(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0)));
assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("192.168.2.1"), 0)));
assertEquals(IpFilterRuleType.REJECT, rule.ruleType());
}
public void testSingleLocalHostRule() throws UnknownHostException {
PatternRule rule = new PatternRule(IpFilterRuleType.ACCEPT, "n:localhost");
assertTrue(rule.isLocalhost());
assertTrue(rule.matches(new InetSocketAddress(getLocalHost(), 0)));
assertEquals(IpFilterRuleType.ACCEPT, rule.ruleType());
}
public void testMultiRules() throws UnknownHostException {
PatternRule rule = new PatternRule(IpFilterRuleType.ACCEPT, "n:localhost,i:127.0.0.1,i:192.168.9.*");
assertTrue(rule.isLocalhost());
assertTrue(rule.matches(new InetSocketAddress(getLocalHost(), 0)));
assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("192.168.9.1"), 0)));
assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0)));
assertFalse(rule.matches(new InetSocketAddress(InetAddress.getByName("192.168.11.1"), 0)));
assertEquals(IpFilterRuleType.ACCEPT, rule.ruleType());
}
public void testAll() throws UnknownHostException {
PatternRule rule = new PatternRule(IpFilterRuleType.ACCEPT, "n:*");
assertFalse(rule.isLocalhost());
assertTrue(rule.matches(new InetSocketAddress(getLocalHost(), 0)));
assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("192.168.9.1"), 0)));
assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0)));
assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("192.168.11.1"), 0)));
assertEquals(IpFilterRuleType.ACCEPT, rule.ruleType());
}
@SuppressForbidden(reason = "just for this test")
private static InetAddress getLocalHost() throws UnknownHostException {
return InetAddress.getLocalHost();
}
}
| PatternRuleTests |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlConfiguration.java | {
"start": 789,
"end": 3962
} | class ____ extends org.elasticsearch.xpack.ql.session.Configuration {
@Nullable
private final String catalog;
private final int pageSize;
private final TimeValue requestTimeout;
private final TimeValue pageTimeout;
private final Mode mode;
private final String clientId;
private final SqlVersion version;
private final boolean multiValueFieldLeniency;
private final boolean includeFrozenIndices;
@Nullable
private final TaskId taskId;
@Nullable
private final SqlQueryTask task;
@Nullable
private final QueryBuilder filter;
@Nullable
private final Map<String, Object> runtimeMappings;
private final boolean allowPartialSearchResults;
private final String projectRouting;
public SqlConfiguration(
ZoneId zi,
@Nullable String catalog,
int pageSize,
TimeValue requestTimeout,
TimeValue pageTimeout,
QueryBuilder filter,
Map<String, Object> runtimeMappings,
Mode mode,
String clientId,
SqlVersion version,
String username,
String clusterName,
boolean multiValueFieldLeniency,
boolean includeFrozen,
@Nullable TaskId taskId,
@Nullable SqlQueryTask task,
boolean allowPartialSearchResults,
String projectRouting
) {
super(zi, username, clusterName);
this.catalog = catalog;
this.pageSize = pageSize;
this.requestTimeout = requestTimeout;
this.pageTimeout = pageTimeout;
this.filter = filter;
this.runtimeMappings = runtimeMappings;
this.mode = mode == null ? Mode.PLAIN : mode;
this.clientId = clientId;
this.version = version != null ? version : SqlVersions.SERVER_COMPAT_VERSION;
this.multiValueFieldLeniency = multiValueFieldLeniency;
this.includeFrozenIndices = includeFrozen;
this.taskId = taskId;
this.task = task;
this.allowPartialSearchResults = allowPartialSearchResults;
this.projectRouting = projectRouting;
}
public String catalog() {
return catalog;
}
public int pageSize() {
return pageSize;
}
public TimeValue requestTimeout() {
return requestTimeout;
}
public TimeValue pageTimeout() {
return pageTimeout;
}
public QueryBuilder filter() {
return filter;
}
public Map<String, Object> runtimeMappings() {
return runtimeMappings;
}
public Mode mode() {
return mode;
}
public String clientId() {
return clientId;
}
public boolean multiValueFieldLeniency() {
return multiValueFieldLeniency;
}
public boolean includeFrozen() {
return includeFrozenIndices;
}
public SqlVersion version() {
return version;
}
public TaskId taskId() {
return taskId;
}
public SqlQueryTask task() {
return task;
}
public boolean allowPartialSearchResults() {
return allowPartialSearchResults;
}
public String projectRouting() {
return projectRouting;
}
}
| SqlConfiguration |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceIntersection.java | {
"start": 729,
"end": 2140
} | class ____ {
private final List<RoleReference> roleReferences;
public RoleReferenceIntersection(RoleReference... roleReferences) {
this(List.of(roleReferences));
}
public RoleReferenceIntersection(List<RoleReference> roleReferences) {
assert roleReferences != null && false == roleReferences.isEmpty() : "role references cannot be null or empty";
this.roleReferences = Objects.requireNonNull(roleReferences);
}
public List<RoleReference> getRoleReferences() {
return roleReferences;
}
public void buildRole(BiConsumer<RoleReference, ActionListener<Role>> singleRoleBuilder, ActionListener<Role> roleActionListener) {
final GroupedActionListener<Role> roleGroupedActionListener = new GroupedActionListener<>(
roleReferences.size(),
roleActionListener.delegateFailureAndWrap((l, roles) -> {
assert false == roles.isEmpty();
final Iterator<Role> iterator = roles.stream().iterator();
Role finalRole = iterator.next();
while (iterator.hasNext()) {
finalRole = finalRole.limitedBy(iterator.next());
}
l.onResponse(finalRole);
})
);
roleReferences.forEach(roleReference -> singleRoleBuilder.accept(roleReference, roleGroupedActionListener));
}
}
| RoleReferenceIntersection |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/query/QueryTokenStream.java | {
"start": 1269,
"end": 8087
} | interface ____ extends Streamable<QueryToken> {
/**
* Creates an empty stream.
*/
static QueryTokenStream empty() {
return EmptyQueryTokenStream.INSTANCE;
}
/**
* Compose a {@link QueryTokenStream} from a collection of inline elements.
*
* @param elements collection of elements.
* @param visitor visitor function converting the element into a {@link QueryTokenStream}.
* @param separator separator token.
* @return the composed token stream.
*/
static <T> QueryTokenStream concat(Collection<T> elements, Function<T, QueryTokenStream> visitor,
QueryToken separator) {
return concat(elements, visitor, QueryRenderer::inline, separator);
}
/**
* Compose a {@link QueryTokenStream} from a collection of expression elements.
*
* @param elements collection of elements.
* @param visitor visitor function converting the element into a {@link QueryTokenStream}.
* @param separator separator token.
* @return the composed token stream.
*/
static <T> QueryTokenStream concatExpressions(Collection<T> elements, Function<T, QueryTokenStream> visitor,
QueryToken separator) {
return concat(elements, visitor, QueryRenderer::ofExpression, separator);
}
/**
* Compose a {@link QueryTokenStream} from a collection of elements. Expressions are rendered using space separators.
*
* @param elements collection of elements.
* @param visitor visitor function converting the element into a {@link QueryTokenStream}.
* @return the composed token stream.
* @since 4.0
*/
static <T> QueryTokenStream concatExpressions(Collection<T> elements, Function<T, QueryTokenStream> visitor) {
if (CollectionUtils.isEmpty(elements)) {
return QueryTokenStream.empty();
}
QueryRenderer.QueryRendererBuilder builder = QueryRenderer.builder();
for (T child : elements) {
if (child instanceof TerminalNode tn) {
builder.append(QueryTokens.expression(tn));
} else {
builder.appendExpression(visitor.apply(child));
}
}
return builder.build();
}
/**
* Compose a {@link QueryTokenStream} from a collection of expressions from a {@link Tree}. Expressions are rendered
* using space separators.
*
* @param elements collection of elements.
* @param visitor visitor function converting the element into a {@link QueryTokenStream}.
* @return the composed token stream.
* @since 4.0
*/
static QueryTokenStream concatExpressions(Tree elements, Function<? super ParseTree, QueryTokenStream> visitor) {
int childCount = elements.getChildCount();
if (childCount == 0) {
return QueryTokenStream.empty();
}
QueryRenderer.QueryRendererBuilder builder = QueryRenderer.builder();
for (int i = 0; i < childCount; i++) {
Tree child = elements.getChild(i);
if (child instanceof TerminalNode tn) {
builder.append(QueryTokens.expression(tn));
} else if (child instanceof ParseTree pt) {
builder.appendExpression(visitor.apply(pt));
} else {
throw new IllegalArgumentException("Unsupported child type: " + child);
}
}
return builder.build();
}
/**
* Compose a {@link QueryTokenStream} from a collection of elements.
*
* @param elements collection of elements.
* @param visitor visitor function converting the element into a {@link QueryTokenStream}.
* @param separator separator token.
* @param postProcess post-processing function to map {@link QueryTokenStream}.
* @return the composed token stream.
*/
static <T> QueryTokenStream concat(Collection<T> elements, Function<T, QueryTokenStream> visitor,
Function<QueryTokenStream, QueryTokenStream> postProcess, QueryToken separator) {
QueryRenderer.QueryRendererBuilder builder = null;
QueryTokenStream firstElement = null;
for (T element : elements) {
QueryTokenStream tokenStream = postProcess.apply(visitor.apply(element));
if (firstElement == null) {
firstElement = tokenStream;
continue;
}
if (builder == null) {
builder = QueryRenderer.builder();
builder.append(firstElement);
}
if (!builder.isEmpty()) {
builder.append(separator);
}
builder.append(tokenStream);
}
if (builder != null) {
return builder;
}
if (firstElement != null) {
return firstElement;
}
return QueryTokenStream.empty();
}
/**
* Creates a {@link QueryTokenStream} that groups the given {@link QueryTokenStream nested token stream} in
* parentheses ({@code (…)}).
*
* @param nested the nested token stream to wrap in parentheses.
* @return a {@link QueryTokenStream} that groups the given {@link QueryTokenStream nested token stream} in
* parentheses.
* @since 5.0
*/
static QueryTokenStream group(QueryTokenStream nested) {
QueryRenderer.QueryRendererBuilder builder = QueryRenderer.builder();
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(nested);
builder.append(TOKEN_CLOSE_PAREN);
return builder.build();
}
/**
* Creates a {@link QueryTokenStream} representing a function call including arguments wrapped in parentheses.
*
* @param functionName function name.
* @param arguments the arguments of the function call.
* @return a {@link QueryTokenStream} representing a function call.
* @since 5.0
*/
static QueryTokenStream ofFunction(TerminalNode functionName, QueryTokenStream arguments) {
QueryRenderer.QueryRendererBuilder builder = QueryRenderer.builder();
builder.append(QueryTokens.token(functionName));
builder.append(TOKEN_OPEN_PAREN);
builder.appendInline(arguments);
builder.append(TOKEN_CLOSE_PAREN);
return builder.build();
}
/**
* @return the first query token or {@code null} if empty.
*/
default @Nullable QueryToken getFirst() {
Iterator<QueryToken> it = iterator();
return it.hasNext() ? it.next() : null;
}
/**
* @return the required first query token or throw {@link java.util.NoSuchElementException} if empty.
* @since 4.0
*/
default QueryToken getRequiredFirst() {
QueryToken first = getFirst();
if (first == null) {
throw new NoSuchElementException("No token in the stream");
}
return first;
}
/**
* @return the last query token or {@code null} if empty.
*/
default @Nullable QueryToken getLast() {
return CollectionUtils.lastElement(toList());
}
/**
* @return the required last query token or throw {@link java.util.NoSuchElementException} if empty.
* @since 4.0
*/
default QueryToken getRequiredLast() {
QueryToken last = getLast();
if (last == null) {
throw new NoSuchElementException("No token in the stream");
}
return last;
}
/**
* @return {@code true} if this stream represents a query expression.
*/
boolean isExpression();
/**
* @return the number of tokens.
*/
int size();
/**
* @return {@code true} if this stream contains no tokens.
*/
boolean isEmpty();
}
| QueryTokenStream |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java | {
"start": 1855,
"end": 22755
} | class ____ implements Iterable<DiscoveryNode>, SimpleDiffable<DiscoveryNodes> {
public static final DiscoveryNodes EMPTY_NODES = builder().build();
private final long nodeLeftGeneration;
private final Map<String, DiscoveryNode> nodes;
private final Map<String, DiscoveryNode> dataNodes;
private final Map<String, DiscoveryNode> masterNodes;
private final Map<String, DiscoveryNode> ingestNodes;
@Nullable
private final String masterNodeId;
@Nullable
private final DiscoveryNode masterNode;
@Nullable
private final String localNodeId;
@Nullable
private final DiscoveryNode localNode;
private final Version maxNodeVersion;
private final Version minNodeVersion;
private final IndexVersion maxDataNodeCompatibleIndexVersion;
private final IndexVersion minSupportedIndexVersion;
private final IndexVersion minReadOnlySupportedIndexVersion;
private final Map<String, Set<String>> tiersToNodeIds;
private DiscoveryNodes(
long nodeLeftGeneration,
Map<String, DiscoveryNode> nodes,
Map<String, DiscoveryNode> dataNodes,
Map<String, DiscoveryNode> masterNodes,
Map<String, DiscoveryNode> ingestNodes,
@Nullable String masterNodeId,
@Nullable String localNodeId,
Version maxNodeVersion,
Version minNodeVersion,
IndexVersion maxDataNodeCompatibleIndexVersion,
IndexVersion minSupportedIndexVersion,
IndexVersion minReadOnlySupportedIndexVersion,
Map<String, Set<String>> tiersToNodeIds
) {
this.nodeLeftGeneration = nodeLeftGeneration;
this.nodes = nodes;
this.dataNodes = dataNodes;
this.masterNodes = masterNodes;
this.ingestNodes = ingestNodes;
this.masterNodeId = masterNodeId;
this.masterNode = masterNodeId == null ? null : nodes.get(masterNodeId);
assert (masterNodeId == null) == (masterNode == null);
this.localNodeId = localNodeId;
this.localNode = localNodeId == null ? null : nodes.get(localNodeId);
this.minNodeVersion = minNodeVersion;
this.maxNodeVersion = maxNodeVersion;
this.maxDataNodeCompatibleIndexVersion = maxDataNodeCompatibleIndexVersion;
this.minSupportedIndexVersion = minSupportedIndexVersion;
this.minReadOnlySupportedIndexVersion = minReadOnlySupportedIndexVersion;
assert minReadOnlySupportedIndexVersion.onOrBefore(minSupportedIndexVersion);
assert (localNodeId == null) == (localNode == null);
this.tiersToNodeIds = tiersToNodeIds;
}
public DiscoveryNodes withMasterNodeId(@Nullable String masterNodeId) {
assert masterNodeId == null || nodes.containsKey(masterNodeId) : "unknown node [" + masterNodeId + "]";
return new DiscoveryNodes(
nodeLeftGeneration,
nodes,
dataNodes,
masterNodes,
ingestNodes,
masterNodeId,
localNodeId,
maxNodeVersion,
minNodeVersion,
maxDataNodeCompatibleIndexVersion,
minSupportedIndexVersion,
minReadOnlySupportedIndexVersion,
tiersToNodeIds
);
}
@Override
public Iterator<DiscoveryNode> iterator() {
return nodes.values().iterator();
}
public Stream<DiscoveryNode> stream() {
return nodes.values().stream();
}
public Collection<DiscoveryNode> getAllNodes() {
return nodes.values();
}
public int size() {
return nodes.size();
}
/**
* Returns {@code true} if the local node is the elected master node.
*/
public boolean isLocalNodeElectedMaster() {
if (localNodeId == null) {
// we don't know yet the local node id, return false
return false;
}
return localNodeId.equals(masterNodeId);
}
/**
* Gets a {@link Map} of node roles to node IDs which have those roles.
*
* @return {@link Map} of node roles to node IDs which have those roles.
*/
public Map<String, Set<String>> getTiersToNodeIds() {
return tiersToNodeIds;
}
/**
* Get the number of known nodes
*
* @return number of nodes
*/
public int getSize() {
return nodes.size();
}
/**
* Get a {@link Map} of the discovered nodes arranged by their ids
*
* @return {@link Map} of the discovered nodes arranged by their ids
*/
public Map<String, DiscoveryNode> getNodes() {
return this.nodes;
}
/**
* Get a {@link Map} of the discovered data nodes arranged by their ids
*
* @return {@link Map} of the discovered data nodes arranged by their ids
*/
public Map<String, DiscoveryNode> getDataNodes() {
return this.dataNodes;
}
/**
* Get a {@link Map} of the discovered master nodes arranged by their ids
*
* @return {@link Map} of the discovered master nodes arranged by their ids
*/
public Map<String, DiscoveryNode> getMasterNodes() {
return this.masterNodes;
}
/**
* @return All the ingest nodes arranged by their ids
*/
public Map<String, DiscoveryNode> getIngestNodes() {
return ingestNodes;
}
/**
* Get a {@link Map} of the discovered master and data nodes arranged by their ids
*
* @return {@link Map} of the discovered master and data nodes arranged by their ids
*/
public Map<String, DiscoveryNode> getMasterAndDataNodes() {
return filteredNodes(nodes, n -> n.canContainData() || n.isMasterNode());
}
/**
* Get a {@link Map} of the coordinating only nodes (nodes which are neither master, nor data, nor ingest nodes) arranged by their ids
*
* @return {@link Map} of the coordinating only nodes arranged by their ids
*/
public Map<String, DiscoveryNode> getCoordinatingOnlyNodes() {
return filteredNodes(nodes, n -> n.canContainData() == false && n.isMasterNode() == false && n.isIngestNode() == false);
}
private static final Comparator<DiscoveryNode> MASTERS_FIRST_COMPARATOR
// Ugly hack: when https://github.com/elastic/elasticsearch/issues/94946 is fixed, remove the sorting by ephemeral ID here
= Comparator.<DiscoveryNode>comparingInt(n -> n.isMasterNode() ? 0 : 1).thenComparing(DiscoveryNode::getEphemeralId);
/**
* Returns a stream of all nodes, with master nodes at the front
*/
public Stream<DiscoveryNode> mastersFirstStream() {
return nodes.values().stream().sorted(MASTERS_FIRST_COMPARATOR);
}
/**
* Get a node by its id
*
* @param nodeId id of the wanted node
* @return wanted node if it exists. Otherwise <code>null</code>
*/
public DiscoveryNode get(String nodeId) {
return nodes.get(nodeId);
}
/**
* Determine if a given node id exists
*
* @param nodeId id of the node which existence should be verified
* @return <code>true</code> if the node exists. Otherwise <code>false</code>
*/
public boolean nodeExists(String nodeId) {
return nodes.containsKey(nodeId);
}
/**
* Determine if a given node exists
*
* @param node of the node which existence should be verified
* @return <code>true</code> if the node exists. Otherwise <code>false</code>
*/
public boolean nodeExists(DiscoveryNode node) {
DiscoveryNode existing = nodes.get(node.getId());
return existing != null && existing.equals(node);
}
/**
* Determine if the given node exists and has the right roles. Supported roles vary by version, and our local cluster state might
* have come via an older master, so the roles may differ even if the node is otherwise identical.
*/
public boolean nodeExistsWithSameRoles(DiscoveryNode discoveryNode) {
final DiscoveryNode existing = nodes.get(discoveryNode.getId());
return existing != null && existing.equals(discoveryNode) && existing.getRoles().equals(discoveryNode.getRoles());
}
/**
* Get the id of the master node
*
* @return id of the master
*/
public String getMasterNodeId() {
return this.masterNodeId;
}
/**
* Get the id of the local node
*
* @return id of the local node
*/
public String getLocalNodeId() {
return this.localNodeId;
}
/**
* Get the local node
*
* @return local node
*/
public DiscoveryNode getLocalNode() {
return localNode;
}
/**
* Returns the master node, or {@code null} if there is no master node
*/
@Nullable
public DiscoveryNode getMasterNode() {
return masterNode;
}
/**
* Get a node by its address
*
* @param address {@link TransportAddress} of the wanted node
* @return node identified by the given address or <code>null</code> if no such node exists
*/
public DiscoveryNode findByAddress(TransportAddress address) {
for (DiscoveryNode node : nodes.values()) {
if (node.getAddress().equals(address)) {
return node;
}
}
return null;
}
/**
* Check if a node with provided name exists
*
* @return {@code true} node identified with provided name exists or {@code false} otherwise
*/
public boolean hasByName(String name) {
for (DiscoveryNode node : nodes.values()) {
if (node.getName().equals(name)) {
return true;
}
}
return false;
}
/**
* {@code true} if this cluster consists of nodes with several release versions
*/
public boolean isMixedVersionCluster() {
return minNodeVersion.equals(maxNodeVersion) == false;
}
/**
* Returns the highest index version supported by all data nodes in the cluster
*/
public IndexVersion getMaxDataNodeCompatibleIndexVersion() {
return maxDataNodeCompatibleIndexVersion;
}
/**
* Returns the version of the node with the oldest version in the cluster.
*
* @return the oldest version in the cluster
*/
public Version getMinNodeVersion() {
return minNodeVersion;
}
/**
* Returns the version of the node with the youngest version in the cluster
*
* @return the youngest version in the cluster
*/
public Version getMaxNodeVersion() {
return maxNodeVersion;
}
/**
* Returns the minimum index version supported by all nodes in the cluster
*/
public IndexVersion getMinSupportedIndexVersion() {
return minSupportedIndexVersion;
}
/**
* Returns the minimum index version for read-only indices supported by all nodes in the cluster
*/
public IndexVersion getMinReadOnlySupportedIndexVersion() {
return minReadOnlySupportedIndexVersion;
}
/**
* Return the node-left generation, which is the number of times the cluster membership has been updated by removing one or more nodes.
* <p>
* Since node-left events are rare, nodes can use the fact that this value has not changed to very efficiently verify that they have not
* been removed from the cluster. If the node-left generation changes then that indicates <i>some</i> node has left the cluster, which
* triggers some more expensive checks to determine the new cluster membership.
* <p>
* Not tracked if the cluster has any nodes older than v8.9.0, in which case this method returns zero.
*/
public long getNodeLeftGeneration() {
return nodeLeftGeneration;
}
/**
* Resolve a node with a given id
*
* @param node id of the node to discover
* @return discovered node matching the given id
* @throws IllegalArgumentException if more than one node matches the request or no nodes have been resolved
*/
public DiscoveryNode resolveNode(String node) {
String[] resolvedNodeIds = resolveNodes(node);
if (resolvedNodeIds.length > 1) {
throw new IllegalArgumentException(
"resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node"
);
}
if (resolvedNodeIds.length == 0) {
throw new IllegalArgumentException("failed to resolve [" + node + "], no matching nodes");
}
return nodes.get(resolvedNodeIds[0]);
}
/**
* Resolves a set of nodes according to the given sequence of node specifications. Implements the logic in various APIs that allow the
* user to run the action on a subset of the nodes in the cluster. See [Node specification] in the reference manual for full details.
*
* Works by tracking the current set of nodes and applying each node specification in sequence. The set starts out empty and each node
* specification may either add or remove nodes. For instance:
*
* - _local, _master and _all respectively add to the subset the local node, the currently-elected master, and all the nodes
* - node IDs, names, hostnames and IP addresses all add to the subset any nodes which match
* - a wildcard-based pattern of the form "attr*:value*" adds to the subset all nodes with a matching attribute with a matching value
* - role:true adds to the subset all nodes with a matching role
* - role:false removes from the subset all nodes with a matching role.
*
* An empty sequence of node specifications returns all nodes, since the corresponding actions run on all nodes by default.
*/
public String[] resolveNodes(String... nodes) {
if (nodes == null || nodes.length == 0) {
return stream().map(DiscoveryNode::getId).toArray(String[]::new);
} else {
Set<String> resolvedNodesIds = Sets.newHashSetWithExpectedSize(nodes.length);
for (String nodeId : nodes) {
if (nodeId == null) {
// don't silence the underlying issue, it is a bug, so lets fail if assertions are enabled
assert nodeId != null : "nodeId should not be null";
continue;
} else if (nodeId.equals("_local")) {
String localNodeId = getLocalNodeId();
if (localNodeId != null) {
resolvedNodesIds.add(localNodeId);
}
} else if (nodeId.equals("_master")) {
String masterNodeId = getMasterNodeId();
if (masterNodeId != null) {
resolvedNodesIds.add(masterNodeId);
}
} else if (nodeExists(nodeId)) {
resolvedNodesIds.add(nodeId);
} else {
for (DiscoveryNode node : this) {
if ("_all".equals(nodeId)
|| Regex.simpleMatch(nodeId, node.getName())
|| Regex.simpleMatch(nodeId, node.getHostAddress())
|| Regex.simpleMatch(nodeId, node.getHostName())) {
resolvedNodesIds.add(node.getId());
}
}
int index = nodeId.indexOf(':');
if (index != -1) {
String matchAttrName = nodeId.substring(0, index);
String matchAttrValue = nodeId.substring(index + 1);
if (DiscoveryNodeRole.roles().stream().map(DiscoveryNodeRole::roleName).anyMatch(s -> s.equals(matchAttrName))) {
final DiscoveryNodeRole role = DiscoveryNodeRole.getRoleFromRoleName(matchAttrName);
final Predicate<Set<DiscoveryNodeRole>> predicate;
if (role.equals(DiscoveryNodeRole.DATA_ROLE)) {
// if the node has *any* role that can contain data, then it matches the data attribute
predicate = s -> s.stream().anyMatch(DiscoveryNodeRole::canContainData);
} else if (role.canContainData()) {
// if the node has the matching data_ role, or the generic data role, then it matches the data_ attribute
predicate = s -> s.stream().anyMatch(r -> r.equals(role) || r.equals(DiscoveryNodeRole.DATA_ROLE));
} else {
// the role is not a data role, we require an exact match (e.g., ingest)
predicate = s -> s.contains(role);
}
final Consumer<String> mutation;
if (Booleans.parseBoolean(matchAttrValue, true)) {
mutation = resolvedNodesIds::add;
} else {
mutation = resolvedNodesIds::remove;
}
for (final DiscoveryNode node : this) {
if (predicate.test(node.getRoles())) {
mutation.accept(node.getId());
}
}
} else if (DiscoveryNode.COORDINATING_ONLY.equals(matchAttrName)) {
if (Booleans.parseBoolean(matchAttrValue, true)) {
resolvedNodesIds.addAll(getCoordinatingOnlyNodes().keySet());
} else {
resolvedNodesIds.removeAll(getCoordinatingOnlyNodes().keySet());
}
} else {
for (DiscoveryNode node : this) {
for (DiscoveryNodeRole role : Sets.difference(node.getRoles(), DiscoveryNodeRole.roles())) {
if (role.roleName().equals(matchAttrName)) {
if (Booleans.parseBoolean(matchAttrValue, true)) {
resolvedNodesIds.add(node.getId());
} else {
resolvedNodesIds.remove(node.getId());
}
}
}
}
for (DiscoveryNode node : this) {
for (Map.Entry<String, String> entry : node.getAttributes().entrySet()) {
String attrName = entry.getKey();
String attrValue = entry.getValue();
if (Regex.simpleMatch(matchAttrName, attrName) && Regex.simpleMatch(matchAttrValue, attrValue)) {
resolvedNodesIds.add(node.getId());
}
}
}
}
}
}
}
return resolvedNodesIds.toArray(Strings.EMPTY_ARRAY);
}
}
/**
* Returns the changes comparing this nodes to the provided nodes.
*/
public Delta delta(DiscoveryNodes other) {
if (this == other) {
return new Delta(this.masterNode, this.masterNode, localNodeId, List.of(), List.of());
}
final List<DiscoveryNode> removed = new ArrayList<>();
final List<DiscoveryNode> added = new ArrayList<>();
for (DiscoveryNode node : other) {
if (this.nodeExists(node) == false) {
removed.add(node);
}
}
for (DiscoveryNode node : this) {
if (other.nodeExists(node) == false) {
added.add(node);
}
}
return new Delta(
other.getMasterNode(),
getMasterNode(),
localNodeId,
Collections.unmodifiableList(removed),
Collections.unmodifiableList(added)
);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("nodes (node-left generation: ").append(nodeLeftGeneration).append("):\n");
for (DiscoveryNode node : this) {
sb.append(" ").append(node);
if (node == getLocalNode()) {
sb.append(", local");
}
if (node == getMasterNode()) {
sb.append(", master");
}
sb.append("\n");
}
return sb.toString();
}
public static | DiscoveryNodes |
java | grpc__grpc-java | alts/src/test/java/io/grpc/alts/internal/AltsHandshakerStubTest.java | {
"start": 1155,
"end": 3945
} | enum ____ {
OK,
ERROR,
COMPLETE
}
private AltsHandshakerStub stub;
private MockWriter writer;
@Before
public void setUp() {
writer = new MockWriter();
stub = new AltsHandshakerStub(writer);
writer.setReader(stub.getReaderForTest());
}
/** Send a message as in_bytes and expect same message as out_frames echo back. */
private void sendSuccessfulMessage() throws Exception {
String message = "hello world";
HandshakerReq.Builder req =
HandshakerReq.newBuilder()
.setNext(
NextHandshakeMessageReq.newBuilder()
.setInBytes(ByteString.copyFromUtf8(message))
.build());
HandshakerResp resp = stub.send(req.build());
assertEquals(resp.getOutFrames().toStringUtf8(), message);
}
/** Send a message and expect an IOException on error. */
private void sendAndExpectError() throws InterruptedException {
try {
stub.send(HandshakerReq.newBuilder().build());
fail("Exception expected");
} catch (IOException ex) {
assertThat(ex).hasMessageThat().contains("Received a terminating error");
assertThat(ex.getCause()).hasMessageThat().contains("Root cause message");
}
}
/** Send a message and expect an IOException on closing. */
private void sendAndExpectComplete() throws InterruptedException {
try {
stub.send(HandshakerReq.newBuilder().build());
fail("Exception expected");
} catch (IOException ex) {
assertThat(ex).hasMessageThat().contains("Response stream closed");
}
}
/** Send a message and expect an IOException on unexpected message. */
private void sendAndExpectUnexpectedMessage() throws InterruptedException {
try {
stub.send(HandshakerReq.newBuilder().build());
fail("Exception expected");
} catch (IOException ex) {
assertThat(ex).hasMessageThat().contains("Received an unexpected response");
}
}
@Test
public void sendSuccessfulMessageTest() throws Exception {
writer.setServiceStatus(Status.OK);
sendSuccessfulMessage();
stub.close();
}
@Test
public void getServiceErrorTest() throws InterruptedException {
writer.setServiceStatus(Status.ERROR);
sendAndExpectError();
stub.close();
}
@Test
public void getServiceCompleteTest() throws Exception {
writer.setServiceStatus(Status.COMPLETE);
sendAndExpectComplete();
stub.close();
}
@Test
public void getUnexpectedMessageTest() throws Exception {
writer.setServiceStatus(Status.OK);
writer.sendUnexpectedResponse();
sendAndExpectUnexpectedMessage();
stub.close();
}
@Test
public void closeEarlyTest() throws InterruptedException {
stub.close();
sendAndExpectComplete();
}
private static | Status |
java | netty__netty | example/src/main/java/io/netty/example/worldclock/WorldClockProtocol.java | {
"start": 30653,
"end": 40930
} | class ____ extends
com.google.protobuf.GeneratedMessage
implements LocationsOrBuilder {
// Use Locations.newBuilder() to construct.
private Locations(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Locations(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Locations defaultInstance;
public static Locations getDefaultInstance() {
return defaultInstance;
}
@Override
public Locations getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Locations(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
location_ = new java.util.ArrayList<io.netty.example.worldclock.WorldClockProtocol.Location>();
mutable_bitField0_ |= 0x00000001;
}
location_.add(input.readMessage(io.netty.example.worldclock.WorldClockProtocol.Location.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
location_ = java.util.Collections.unmodifiableList(location_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Locations_descriptor;
}
@Override
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return io.netty.example.worldclock.WorldClockProtocol.internal_static_io_netty_example_worldclock_Locations_fieldAccessorTable
.ensureFieldAccessorsInitialized(
io.netty.example.worldclock.WorldClockProtocol.Locations.class, io.netty.example.worldclock.WorldClockProtocol.Locations.Builder.class);
}
public static com.google.protobuf.Parser<Locations> PARSER =
new com.google.protobuf.AbstractParser<Locations>() {
@Override
public Locations parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Locations(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<Locations> getParserForType() {
return PARSER;
}
// repeated .io.netty.example.worldclock.Location location = 1;
public static final int LOCATION_FIELD_NUMBER = 1;
private java.util.List<io.netty.example.worldclock.WorldClockProtocol.Location> location_;
/**
* <code>repeated .io.netty.example.worldclock.Location location = 1;</code>
*/
@Override
public java.util.List<io.netty.example.worldclock.WorldClockProtocol.Location> getLocationList() {
return location_;
}
/**
* <code>repeated .io.netty.example.worldclock.Location location = 1;</code>
*/
@Override
public java.util.List<? extends io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder>
getLocationOrBuilderList() {
return location_;
}
/**
* <code>repeated .io.netty.example.worldclock.Location location = 1;</code>
*/
@Override
public int getLocationCount() {
return location_.size();
}
/**
* <code>repeated .io.netty.example.worldclock.Location location = 1;</code>
*/
@Override
public io.netty.example.worldclock.WorldClockProtocol.Location getLocation(int index) {
return location_.get(index);
}
/**
* <code>repeated .io.netty.example.worldclock.Location location = 1;</code>
*/
@Override
public io.netty.example.worldclock.WorldClockProtocol.LocationOrBuilder getLocationOrBuilder(
int index) {
return location_.get(index);
}
private void initFields() {
location_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
@Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getLocationCount(); i++) {
if (!getLocation(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < location_.size(); i++) {
output.writeMessage(1, location_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
@Override
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < location_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, location_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static io.netty.example.worldclock.WorldClockProtocol.Locations parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static io.netty.example.worldclock.WorldClockProtocol.Locations parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static io.netty.example.worldclock.WorldClockProtocol.Locations parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
@Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(io.netty.example.worldclock.WorldClockProtocol.Locations prototype) {
return newBuilder().mergeFrom(prototype);
}
@Override
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code io.netty.example.worldclock.Locations}
*/
public static final | Locations |
java | junit-team__junit5 | junit-platform-commons/src/main/java/org/junit/platform/commons/util/ReflectionUtils.java | {
"start": 29443,
"end": 30449
} | class ____ which the method should be referenced;
* never {@code null}
* @param methodName the name of the method; never {@code null} or blank
* @param parameterTypeNames the parameter type names of the method; may be
* empty but not {@code null}
* @return fully qualified method name; never {@code null}
* @since 1.11
*/
@API(status = INTERNAL, since = "1.11")
public static String getFullyQualifiedMethodName(String className, String methodName, String parameterTypeNames) {
Preconditions.notBlank(className, "Class name must not be null or blank");
Preconditions.notBlank(methodName, "Method name must not be null or blank");
Preconditions.notNull(parameterTypeNames, "Parameter type names must not be null");
return "%s#%s(%s)".formatted(className, methodName, parameterTypeNames);
}
/**
* Parse the supplied <em>fully qualified method name</em> into a 3-element
* {@code String[]} with the following content.
*
* <ul>
* <li>index {@code 0}: the fully qualified | from |
java | apache__camel | components/camel-elasticsearch-rest-client/src/generated/java/org/apache/camel/component/elasticsearch/rest/client/ElasticsearchRestClientComponentConfigurer.java | {
"start": 752,
"end": 5698
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
ElasticsearchRestClientComponent target = (ElasticsearchRestClientComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "certificatepath":
case "certificatePath": target.setCertificatePath(property(camelContext, java.lang.String.class, value)); return true;
case "connectiontimeout":
case "connectionTimeout": target.setConnectionTimeout(property(camelContext, int.class, value)); return true;
case "enablesniffer":
case "enableSniffer": target.setEnableSniffer(property(camelContext, boolean.class, value)); return true;
case "hostaddresseslist":
case "hostAddressesList": target.setHostAddressesList(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "password": target.setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "restclient":
case "restClient": target.setRestClient(property(camelContext, org.elasticsearch.client.RestClient.class, value)); return true;
case "sniffafterfailuredelay":
case "sniffAfterFailureDelay": target.setSniffAfterFailureDelay(property(camelContext, int.class, value)); return true;
case "snifferinterval":
case "snifferInterval": target.setSnifferInterval(property(camelContext, int.class, value)); return true;
case "sockettimeout":
case "socketTimeout": target.setSocketTimeout(property(camelContext, int.class, value)); return true;
case "user": target.setUser(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public String[] getAutowiredNames() {
return new String[]{"restClient"};
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "certificatepath":
case "certificatePath": return java.lang.String.class;
case "connectiontimeout":
case "connectionTimeout": return int.class;
case "enablesniffer":
case "enableSniffer": return boolean.class;
case "hostaddresseslist":
case "hostAddressesList": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "password": return java.lang.String.class;
case "restclient":
case "restClient": return org.elasticsearch.client.RestClient.class;
case "sniffafterfailuredelay":
case "sniffAfterFailureDelay": return int.class;
case "snifferinterval":
case "snifferInterval": return int.class;
case "sockettimeout":
case "socketTimeout": return int.class;
case "user": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
ElasticsearchRestClientComponent target = (ElasticsearchRestClientComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "certificatepath":
case "certificatePath": return target.getCertificatePath();
case "connectiontimeout":
case "connectionTimeout": return target.getConnectionTimeout();
case "enablesniffer":
case "enableSniffer": return target.isEnableSniffer();
case "hostaddresseslist":
case "hostAddressesList": return target.getHostAddressesList();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "password": return target.getPassword();
case "restclient":
case "restClient": return target.getRestClient();
case "sniffafterfailuredelay":
case "sniffAfterFailureDelay": return target.getSniffAfterFailureDelay();
case "snifferinterval":
case "snifferInterval": return target.getSnifferInterval();
case "sockettimeout":
case "socketTimeout": return target.getSocketTimeout();
case "user": return target.getUser();
default: return null;
}
}
}
| ElasticsearchRestClientComponentConfigurer |
java | jhy__jsoup | src/test/java/org/jsoup/parser/StreamParserTest.java | {
"start": 2443,
"end": 15103
} | interface
____ html = "<title>Test</title></head><div id=1>D1</div><div id=2>D2<p id=3><span>P One</p><p id=4>P Two</p></div><div id=5>D3<p id=6>P three</p>";
StreamParser parser = new StreamParser(Parser.htmlParser()).parse(html, "");
StringBuilder seen = new StringBuilder();
Iterator<Element> it = parser.iterator();
while (it.hasNext()) {
trackSeen(it.next(), seen);
}
assertEquals("title[Test];head+;div#1[D1]+;span[P One];p#3+;p#4[P Two];div#2[D2]+;p#6[P three];div#5[D3];body;html;#root;", seen.toString());
// checks expected order, and the + indicates that element had a next sibling at time of emission
}
@Test void canReuse() {
StreamParser parser = new StreamParser(Parser.htmlParser());
String html1 = "<p>One<p>Two";
parser.parse(html1, "");
StringBuilder seen = new StringBuilder();
parser.stream().forEach(el -> trackSeen(el, seen));
assertEquals("head+;p[One]+;p[Two];body;html;#root;", seen.toString());
String html2 = "<div>Three<div>Four</div></div>";
StringBuilder seen2 = new StringBuilder();
parser.parse(html2, "");
parser.stream().forEach(el -> trackSeen(el, seen2));
assertEquals("head+;div[Four];div[Three];body;html;#root;", seen2.toString());
// re-run without a new parse should be empty
StringBuilder seen3 = new StringBuilder();
parser.stream().forEach(el -> trackSeen(el, seen3));
assertEquals("", seen3.toString());
}
@Test void canStopAndCompleteAndReuse() throws IOException {
StreamParser parser = new StreamParser(Parser.htmlParser());
String html1 = "<p>One<p>Two";
parser.parse(html1, "");
Element p = parser.expectFirst("p");
assertEquals("One", p.text());
parser.stop();
Iterator<Element> it = parser.iterator();
assertFalse(it.hasNext());
assertThrows(NoSuchElementException.class, it::next);
Element p2 = parser.selectNext("p");
assertNull(p2);
Document completed = parser.complete();
Elements ps = completed.select("p");
assertEquals(2, ps.size());
assertEquals("One", ps.get(0).text());
assertEquals("Two", ps.get(1).text());
// can reuse
parser.parse("<div>DIV", "");
Element div = parser.expectFirst("div");
assertEquals("DIV", div.text());
}
static void trackSeen(Element el, StringBuilder actual) {
actual.append(el.tagName());
if (el.hasAttr("id"))
actual.append("#").append(el.id());
if (!el.ownText().isEmpty())
actual.append("[").append(el.ownText()).append("]");
if (el.nextElementSibling() != null)
actual.append("+");
actual.append(";");
}
@Test void select() throws IOException {
String html = "<title>One</title><p id=1>P One</p><p id=2>P Two</p>";
StreamParser parser = new StreamParser(Parser.htmlParser()).parse(html, "");
Element title = parser.expectFirst("title");
assertEquals("One", title.text());
Document partialDoc = title.ownerDocument();
assertNotNull(partialDoc);
// at this point, we should have one P with no text - as title was emitted on P head
Elements ps = partialDoc.select("p");
assertEquals(1, ps.size());
assertEquals("", ps.get(0).text());
assertSame(partialDoc, parser.document());
Element title2 = parser.selectFirst("title");
assertSame(title2, title);
Element p1 = parser.expectNext("p");
assertEquals("P One", p1.text());
Element p2 = parser.expectNext("p");
assertEquals("P Two", p2.text());
Element pNone = parser.selectNext("p");
assertNull(pNone);
}
@Test void canRemoveFromDom() {
String html = "<div>One</div><div>DESTROY</div><div>Two</div>";
StreamParser parser = new StreamParser(Parser.htmlParser()).parse(html, "");
parser.parse(html, "");
parser.stream().forEach(
el -> {
if (el.ownText().equals("DESTROY"))
el.remove();
});
Document doc = parser.document();
Elements divs = doc.select("div");
assertEquals(2, divs.size());
assertEquals("One Two", divs.text());
}
@Test void canRemoveWithIterator() {
String html = "<div>One</div><div>DESTROY</div><div>Two</div>";
StreamParser parser = new StreamParser(Parser.htmlParser()).parse(html, "");
parser.parse(html, "");
Iterator<Element> it = parser.iterator();
while (it.hasNext()) {
Element el = it.next();
if (el.ownText().equals("DESTROY"))
it.remove(); // we know el.remove() works, from above test
}
Document doc = parser.document();
Elements divs = doc.select("div");
assertEquals(2, divs.size());
assertEquals("One Two", divs.text());
}
@Test void canSelectWithHas() throws IOException {
StreamParser parser = basic();
Element el = parser.expectNext("div:has(p)");
assertEquals("Two", el.text());
}
@Test void canSelectWithSibling() throws IOException {
StreamParser parser = basic();
Element el = parser.expectNext("div:first-of-type");
assertEquals("One", el.text());
Element el2 = parser.selectNext("div:first-of-type");
assertNull(el2);
}
@Test void canLoopOnSelectNext() throws IOException {
StreamParser streamer = new StreamParser(Parser.htmlParser()).parse("<div><p>One<p>Two<p>Thr</div>", "");
int count = 0;
Element e;
while ((e = streamer.selectNext("p")) != null) {
assertEquals(3, e.text().length()); // has a body
e.remove();
count++;
}
assertEquals(3, count);
assertEquals(0, streamer.document().select("p").size()); // removed all during iter
assertTrue(isClosed(streamer)); // read to the end
}
@Test void worksWithXmlParser() throws IOException {
StreamParser streamer = new StreamParser(Parser.xmlParser()).parse("<div><p>One</p><p>Two</p><p>Thr</p></div>", "");
int count = 0;
Element e;
while ((e = streamer.selectNext("p")) != null) {
assertEquals(3, e.text().length()); // has a body
e.remove();
count++;
}
assertEquals(3, count);
assertEquals(0, streamer.document().select("p").size()); // removed all during iter
assertTrue(isClosed(streamer)); // read to the end
}
@Test void closedOnStreamDrained() {
StreamParser streamer = basic();
assertFalse(isClosed(streamer));
long count = streamer.stream().count();
assertEquals(7, count);
assertTrue(isClosed(streamer));
}
@Test void closedOnIteratorDrained() {
StreamParser streamer = basic();
int count = 0;
Iterator<Element> it = streamer.iterator();
while (it.hasNext()) {
it.next();
count++;
}
assertEquals(7, count);
assertTrue(isClosed(streamer));
}
@Test void closedOnComplete() throws IOException {
StreamParser streamer = basic();
Document doc = streamer.complete();
assertTrue(isClosed(streamer));
}
@Test void closedOnTryWithResources() {
StreamParser copy;
try(StreamParser streamer = basic()) {
copy = streamer;
assertFalse(isClosed(copy));
}
assertTrue(isClosed(copy));
}
static StreamParser basic() {
String html = "<div>One</div><div><p>Two</div>";
StreamParser parser = new StreamParser(Parser.htmlParser()).parse(html, "");
parser.parse(html, "");
return parser;
}
static boolean isClosed(StreamParser streamer) {
// a bit of a back door in!
return getReader(streamer) == null;
}
private static CharacterReader getReader(StreamParser streamer) {
return streamer.document().parser().getTreeBuilder().reader;
}
@Test void doesNotReadPastParse() throws IOException {
StreamParser streamer = basic();
Element div = streamer.expectFirst("div");
// we should have read the sibling div, but not yet its children p
Element sib = div.nextElementSibling();
assertNotNull(sib);
assertEquals("div", sib.tagName());
assertEquals(0, sib.childNodeSize());
// the Reader should be at "<p>" because we haven't consumed it
assertTrue(getReader(streamer).matches("<p>Two"));
}
@Test void canParseFileReader() throws IOException {
File file = ParseTest.getFile("/htmltests/large.html");
// can't use FileReader from Java 11 here
InputStreamReader input = new InputStreamReader(Files.newInputStream(file.toPath()), StandardCharsets.UTF_8);
BufferedReader reader = new BufferedReader(input);
StreamParser streamer = new StreamParser(Parser.htmlParser()).parse(reader, file.getAbsolutePath());
Element last = null, e;
while ((e = streamer.selectNext("p")) != null) {
last = e;
}
assertTrue(last.text().startsWith("VESTIBULUM"));
// the reader should be closed as streamer is closed on completion of read
assertTrue(isClosed(streamer));
assertThrows(IOException.class, reader::ready); // ready() checks isOpen and throws
}
@Test void canParseFile() throws IOException {
File file = ParseTest.getFile("/htmltests/large.html");
StreamParser streamer = DataUtil.streamParser(file.toPath(), StandardCharsets.UTF_8, "", Parser.htmlParser());
Element last = null, e;
while ((e = streamer.selectNext("p")) != null) {
last = e;
}
assertTrue(last.text().startsWith("VESTIBULUM"));
// the reader should be closed as streamer is closed on completion of read
assertTrue(isClosed(streamer));
}
@Test void canCleanlyConsumePortionOfUrl() throws IOException {
// test that we can get just the head section of large.html, and only read the minimum required from the URL
String url = FileServlet.urlTo("/htmltests/large.html"); // 280 K
AtomicReference<Float> seenPercent = new AtomicReference<>(0.0f);
StreamParser parserRef;
Connection con = Jsoup.connect(url)
.onResponseProgress((processed, total, percent, response) -> {
//System.out.println("Processed: " + processed + " Total: " + total + " Percent: " + percent);
seenPercent.set(percent);
});
Connection.Response response = con.execute();
try (StreamParser parser = response.streamParser()) {
parserRef = parser;
// get the head section
Element head = parser.selectFirst("head");
Element title = head.expectFirst("title");
assertEquals("Large HTML", title.text());
}
// now that we've left the try, the stream parser and the response bodystream should be closed
assertTrue(isClosed(parserRef));
// test that we didn't read all of the stream
assertTrue(seenPercent.get() > 0.0f);
assertTrue(seenPercent.get() < 100.0f);
// not sure of a good way to assert the bufferedInputReader buf (as held by ConstrainableInputStream in Response.BodyStream) is null. But it is via StreamParser.close.
}
// Fragments
@Test
void canStreamFragment() {
String html = "<tr id=1><td>One</td><tr id=2><td>Two</td></tr><tr id=3><td>Three</td></tr>";
Element context = new Element("table");
try (StreamParser parser = new StreamParser(Parser.htmlParser()).parseFragment(html, context, "")) {
StringBuilder seen = new StringBuilder();
parser.stream().forEachOrdered(el -> trackSeen(el, seen));
assertEquals("td[One];tr#1+;td[Two];tr#2+;td[Three];tr#3;tbody;table;#root;", seen.toString());
// checks expected order, and the + indicates that element had a next sibling at time of emission
// note that we don't get a full doc, just the fragment (and the context at the end of the stack)
assertTrue(isClosed(parser)); // as read to completion
}
}
@Test void canIterateFragment() {
// same as stream, just a different | String |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/SDKStreamDrainer.java | {
"start": 1591,
"end": 8567
} | class ____<TStream extends InputStream & Abortable>
implements CallableRaisingIOE<Boolean> {
private static final Logger LOG = LoggerFactory.getLogger(
SDKStreamDrainer.class);
/**
* URI for log messages.
*/
private final String uri;
/**
* Stream from the getObject response for draining and closing.
*/
private final TStream sdkStream;
/**
* Should the request be aborted?
*/
private final boolean shouldAbort;
/**
* How many bytes remaining?
* This is decremented as the stream is
* drained;
* If the stream finished before the expected
* remaining value was read, this will show how many
* bytes were still expected.
*/
private int remaining;
/**
* Statistics to update with the duration.
*/
private final S3AInputStreamStatistics streamStatistics;
/**
* Reason? for log messages.
*/
private final String reason;
/**
* Has the operation executed yet?
*/
private final AtomicBoolean executed = new AtomicBoolean(false);
/**
* Any exception caught during execution.
*/
private Exception thrown;
/**
* Was the stream aborted?
*/
private boolean aborted;
/**
* how many bytes were drained?
*/
private int drained = 0;
/**
* Prepare to drain the stream.
* @param uri URI for messages
* @param sdkStream stream to close.
* @param shouldAbort force an abort; used if explicitly requested.
* @param streamStatistics stats to update
* @param reason reason for stream being closed; used in messages
* @param remaining remaining bytes
*/
public SDKStreamDrainer(final String uri,
final TStream sdkStream,
final boolean shouldAbort,
final int remaining,
final S3AInputStreamStatistics streamStatistics,
final String reason) {
this.uri = uri;
this.sdkStream = requireNonNull(sdkStream);
this.shouldAbort = shouldAbort;
this.remaining = remaining;
this.streamStatistics = requireNonNull(streamStatistics);
this.reason = reason;
}
/**
* drain the stream. This method is intended to be
* used directly or asynchronously, and measures the
* duration of the operation in the stream statistics.
* @return was the stream aborted?
*/
@Override
public Boolean apply() {
try {
Boolean outcome = invokeTrackingDuration(
streamStatistics.initiateInnerStreamClose(shouldAbort),
this::drainOrAbortHttpStream);
aborted = outcome;
return outcome;
} catch (Exception e) {
thrown = e;
return aborted;
}
}
/**
* Apply, raising any exception.
* For testing.
* @return the outcome.
* @throws Exception anything raised.
*/
@VisibleForTesting
boolean applyRaisingException() throws Exception {
Boolean outcome = apply();
if (thrown != null) {
throw thrown;
}
return outcome;
}
/**
* Drain or abort the inner stream.
* Exceptions are saved then swallowed.
* If a close() is attempted and fails, the operation escalates to
* an abort.
* @return true if the stream was aborted.
*/
private boolean drainOrAbortHttpStream() {
if (executed.getAndSet(true)) {
throw new IllegalStateException(
"duplicate invocation of drain operation");
}
boolean executeAbort = shouldAbort;
if (remaining > 0 || executeAbort) {
// only log if there is a drain or an abort
LOG.debug("drain or abort reason {} remaining={} abort={}",
reason, remaining, executeAbort);
}
if (!executeAbort) {
try {
// clean close. This will read to the end of the stream,
// so, while cleaner, can be pathological on a multi-GB object
if (remaining > 0) {
// explicitly drain the stream
LOG.debug("draining {} bytes", remaining);
drained = 0;
int size = Math.min(remaining, DRAIN_BUFFER_SIZE);
byte[] buffer = new byte[size];
// read the data; bail out early if
// the connection breaks.
// this may be a bit overaggressive on buffer underflow.
while (remaining > 0) {
final int count = sdkStream.read(buffer);
LOG.debug("read {} bytes", count);
if (count <= 0) {
// no more data is left
break;
}
drained += count;
remaining -= count;
}
LOG.debug("Drained stream of {} bytes", drained);
}
if (remaining != 0) {
// fewer bytes than expected came back; not treating as a
// reason to escalate to an abort().
// just log.
LOG.debug("drained fewer bytes than expected; {} remaining",
remaining);
}
// now close it.
// if there is still data in the stream, the SDK
// will warn and escalate to an abort itself.
LOG.debug("Closing stream");
sdkStream.close();
// this MUST come after the close, so that if the IO operations fail
// and an abort is triggered, the initial attempt's statistics
// aren't collected.
streamStatistics.streamClose(false, drained);
return false;
} catch (Exception e) {
// exception escalates to an abort
LOG.debug("When closing {} stream for {}, will abort the stream",
uri, reason, e);
thrown = e;
}
}
// Abort, rather than just close, the underlying stream. Otherwise, the
// remaining object payload is read from S3 while closing the stream.
LOG.debug("Aborting stream {}", uri);
try {
sdkStream.abort();
} catch (Exception e) {
LOG.warn("When aborting {} stream after failing to close it for {}",
uri, reason, e);
thrown = e;
}
streamStatistics.streamClose(true, remaining);
LOG.debug("Stream {} aborted: {}; remaining={}",
uri, reason, remaining);
return true;
}
public String getUri() {
return uri;
}
public TStream getSdkStream() {
return sdkStream;
}
public boolean shouldAbort() {
return shouldAbort;
}
public int getRemaining() {
return remaining;
}
public S3AInputStreamStatistics getStreamStatistics() {
return streamStatistics;
}
public String getReason() {
return reason;
}
public boolean executed() {
return executed.get();
}
public Exception getThrown() {
return thrown;
}
public int getDrained() {
return drained;
}
public boolean aborted() {
return aborted;
}
@Override
public String toString() {
return "SDKStreamDrainer{" +
"uri='" + uri + '\'' +
", reason='" + reason + '\'' +
", shouldAbort=" + shouldAbort +
", remaining=" + remaining +
", executed=" + executed.get() +
", aborted=" + aborted +
", inner=" + sdkStream +
", thrown=" + thrown +
'}';
}
}
| SDKStreamDrainer |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/strategy/internal/ValidityAuditStrategy.java | {
"start": 26444,
"end": 26987
} | class ____ extends Update {
private final List<QueryParameterBinding> bindings = new ArrayList<>( 0 );
public UpdateContext(SessionFactoryImplementor sessionFactory) {
super ( sessionFactory );
}
public List<QueryParameterBinding> getBindings() {
return bindings;
}
public void bind(Object value, Type type) {
bindings.add( new QueryParameterBindingType( value, type ) );
}
public void bind(Object value, ModelPart part) {
bindings.add( new QueryParameterBindingPart( value, part ) );
}
}
private | UpdateContext |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/consumer/StickyAssignor.java | {
"start": 8288,
"end": 13028
} | class ____ extends AbstractStickyAssignor {
public static final String STICKY_ASSIGNOR_NAME = "sticky";
// these schemas are used for preserving consumer's previously assigned partitions
// list and sending it as user data to the leader during a rebalance
static final String TOPIC_PARTITIONS_KEY_NAME = "previous_assignment";
static final String TOPIC_KEY_NAME = "topic";
static final String PARTITIONS_KEY_NAME = "partitions";
private static final String GENERATION_KEY_NAME = "generation";
static final Schema TOPIC_ASSIGNMENT = new Schema(
new Field(TOPIC_KEY_NAME, Type.STRING),
new Field(PARTITIONS_KEY_NAME, new ArrayOf(Type.INT32)));
static final Schema STICKY_ASSIGNOR_USER_DATA_V0 = new Schema(
new Field(TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(TOPIC_ASSIGNMENT)));
private static final Schema STICKY_ASSIGNOR_USER_DATA_V1 = new Schema(
new Field(TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(TOPIC_ASSIGNMENT)),
new Field(GENERATION_KEY_NAME, Type.INT32));
private List<TopicPartition> memberAssignment = null;
private int generation = DEFAULT_GENERATION; // consumer group generation
@Override
public String name() {
return STICKY_ASSIGNOR_NAME;
}
@Override
public void onAssignment(Assignment assignment, ConsumerGroupMetadata metadata) {
memberAssignment = assignment.partitions();
this.generation = metadata.generationId();
}
@Override
public ByteBuffer subscriptionUserData(Set<String> topics) {
if (memberAssignment == null)
return null;
return serializeTopicPartitionAssignment(new MemberData(memberAssignment, Optional.of(generation)));
}
@Override
protected MemberData memberData(Subscription subscription) {
// Always deserialize ownedPartitions and generation id from user data
// since StickyAssignor is an eager rebalance protocol that will revoke all existing partitions before joining group
ByteBuffer userData = subscription.userData();
if (userData == null || !userData.hasRemaining()) {
return new MemberData(Collections.emptyList(), Optional.empty(), subscription.rackId());
}
return deserializeTopicPartitionAssignment(userData);
}
// visible for testing
static ByteBuffer serializeTopicPartitionAssignment(MemberData memberData) {
Struct struct = new Struct(STICKY_ASSIGNOR_USER_DATA_V1);
List<Struct> topicAssignments = new ArrayList<>();
for (Map.Entry<String, List<Integer>> topicEntry : CollectionUtils.groupPartitionsByTopic(memberData.partitions).entrySet()) {
Struct topicAssignment = new Struct(TOPIC_ASSIGNMENT);
topicAssignment.set(TOPIC_KEY_NAME, topicEntry.getKey());
topicAssignment.set(PARTITIONS_KEY_NAME, topicEntry.getValue().toArray());
topicAssignments.add(topicAssignment);
}
struct.set(TOPIC_PARTITIONS_KEY_NAME, topicAssignments.toArray());
memberData.generation.ifPresent(integer -> struct.set(GENERATION_KEY_NAME, integer));
ByteBuffer buffer = ByteBuffer.allocate(STICKY_ASSIGNOR_USER_DATA_V1.sizeOf(struct));
STICKY_ASSIGNOR_USER_DATA_V1.write(buffer, struct);
buffer.flip();
return buffer;
}
private static MemberData deserializeTopicPartitionAssignment(ByteBuffer buffer) {
Struct struct;
ByteBuffer copy = buffer.duplicate();
try {
struct = STICKY_ASSIGNOR_USER_DATA_V1.read(buffer);
} catch (Exception e1) {
try {
// fall back to older schema
struct = STICKY_ASSIGNOR_USER_DATA_V0.read(copy);
} catch (Exception e2) {
// ignore the consumer's previous assignment if it cannot be parsed
return new MemberData(Collections.emptyList(), Optional.of(DEFAULT_GENERATION));
}
}
List<TopicPartition> partitions = new ArrayList<>();
for (Object structObj : struct.getArray(TOPIC_PARTITIONS_KEY_NAME)) {
Struct assignment = (Struct) structObj;
String topic = assignment.getString(TOPIC_KEY_NAME);
for (Object partitionObj : assignment.getArray(PARTITIONS_KEY_NAME)) {
Integer partition = (Integer) partitionObj;
partitions.add(new TopicPartition(topic, partition));
}
}
// make sure this is backward compatible
Optional<Integer> generation = struct.hasField(GENERATION_KEY_NAME) ? Optional.of(struct.getInt(GENERATION_KEY_NAME)) : Optional.empty();
return new MemberData(partitions, generation);
}
} | StickyAssignor |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/runtime/IPv6HostnamesITCase.java | {
"start": 6019,
"end": 8226
} | interface
____<InetAddress> ee = netInterface.getInetAddresses();
while (ee.hasMoreElements()) {
InetAddress addr = ee.nextElement();
if (addr instanceof Inet6Address
&& (!addr.isLoopbackAddress())
&& (!addr.isAnyLocalAddress())) {
// see if it is possible to bind to the address
InetSocketAddress socketAddress = new InetSocketAddress(addr, 0);
try {
log.info("Considering address " + addr);
// test whether we can bind a socket to that address
log.info("Testing whether sockets can bind to " + addr);
ServerSocket sock = new ServerSocket();
sock.bind(socketAddress);
sock.close();
// test whether Pekko's netty can bind to the address
log.info("Testing whether Pekko can use " + addr);
final RpcService rpcService =
RpcSystem.load()
// this port is only used for advertising (==no port
// conflicts) since we explicitly provide a bind port
.remoteServiceBuilder(new Configuration(), null, "8081")
.withBindAddress(addr.getHostAddress())
.withBindPort(0)
.createAndStart();
rpcService.closeAsync().get();
log.info("Using address " + addr);
return (Inet6Address) addr;
} catch (IOException ignored) {
// fall through the loop
}
}
}
}
return null;
} catch (Exception e) {
return null;
}
}
}
| Enumeration |
java | quarkusio__quarkus | extensions/smallrye-graphql/deployment/src/test/java/io/quarkus/smallrye/graphql/deployment/ShowRuntimeExceptionMessageTest.java | {
"start": 566,
"end": 2357
} | class ____ extends AbstractGraphQLTest {
private static final String ILLEGAL_ARGUMENT_EXCEPTION_MESSAGE = "Something went wrong";
private static final String ILLEGAL_STATE_EXCEPTION_MESSAGE = "Something else went wrong";
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(TestApi.class)
.addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml")
.addAsResource(
new StringAsset(
"quarkus.smallrye-graphql.show-runtime-exception-message=" +
"java.lang.IllegalArgumentException," +
"java.lang.IllegalStateException"),
"application.properties"));
@Test
void testExcludeNullFieldsInResponse() {
given()
.when()
.accept(MEDIATYPE_JSON)
.contentType(MEDIATYPE_JSON)
.body(getPayload("{ something }"))
.post("/graphql")
.then()
.assertThat()
.statusCode(OK)
.and()
.body(containsString(ILLEGAL_ARGUMENT_EXCEPTION_MESSAGE));
given()
.when()
.accept(MEDIATYPE_JSON)
.contentType(MEDIATYPE_JSON)
.body(getPayload("{ somethingElse }"))
.post("/graphql")
.then()
.assertThat()
.statusCode(OK)
.and()
.body(containsString(ILLEGAL_STATE_EXCEPTION_MESSAGE));
}
@GraphQLApi
public static | ShowRuntimeExceptionMessageTest |
java | quarkusio__quarkus | extensions/reactive-routes/runtime/src/main/java/io/quarkus/vertx/web/runtime/MultiNdjsonSupport.java | {
"start": 492,
"end": 3326
} | class ____ {
private MultiNdjsonSupport() {
// Avoid direct instantiation.
}
private static void initialize(HttpServerResponse response, RoutingContext rc) {
if (response.bytesWritten() == 0) {
MultiMap headers = response.headers();
if (headers.get(HttpHeaders.CONTENT_TYPE) == null) {
if (rc.getAcceptableContentType() == null) {
headers.set(HttpHeaders.CONTENT_TYPE, "application/x-ndjson");
} else {
headers.set(HttpHeaders.CONTENT_TYPE, rc.getAcceptableContentType());
}
}
response.setChunked(true);
}
}
public static void subscribeString(Multi<String> multi, RoutingContext rc) {
write(multi.map(s -> Buffer.buffer("\"" + s + "\"\n")), rc);
}
public static void subscribeObject(Multi<Object> multi, RoutingContext rc) {
write(multi.map(o -> Buffer.buffer(Json.encode(o) + "\n")), rc);
}
private static void onWriteDone(Subscription subscription, AsyncResult<Void> ar, RoutingContext rc) {
if (ar.failed()) {
rc.fail(ar.cause());
} else {
subscription.request(1);
}
}
public static void write(Multi<Buffer> multi, RoutingContext rc) {
HttpServerResponse response = rc.response();
multi.subscribe().withSubscriber(new Subscriber<Buffer>() {
Subscription upstream;
@Override
public void onSubscribe(Subscription subscription) {
this.upstream = subscription;
this.upstream.request(1);
}
@Override
public void onNext(Buffer item) {
initialize(response, rc);
response.write(item, ar -> onWriteDone(upstream, ar, rc));
}
@Override
public void onError(Throwable throwable) {
rc.fail(throwable);
}
@Override
public void onComplete() {
endOfStream(response, rc);
}
});
}
private static void endOfStream(HttpServerResponse response, RoutingContext rc) {
if (response.bytesWritten() == 0) { // No item
MultiMap headers = response.headers();
if (headers.get(HttpHeaders.CONTENT_TYPE) == null) {
if (rc.getAcceptableContentType() == null) {
headers.set(HttpHeaders.CONTENT_TYPE, "application/x-ndjson");
} else {
headers.set(HttpHeaders.CONTENT_TYPE, rc.getAcceptableContentType());
}
}
}
response.end();
}
public static boolean isNdjson(Multi<?> multi) {
return multi instanceof NdjsonMulti;
}
}
| MultiNdjsonSupport |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/inference/TypeInferenceUtil.java | {
"start": 12570,
"end": 14928
} | interface ____ {
static SurroundingInfo of(
String name,
FunctionDefinition functionDefinition,
TypeInference typeInference,
int argumentCount,
int innerCallPosition,
boolean isGroupedAggregation) {
return typeFactory -> {
final boolean isValidCount =
validateArgumentCount(
typeInference.getInputTypeStrategy().getArgumentCount(),
argumentCount,
false);
if (!isValidCount) {
return Optional.empty();
}
// for "takes_string(this_function(NULL))" simulate "takes_string(NULL)"
// for retrieving the output type of "this_function(NULL)"
final CallContext callContext =
new UnknownCallContext(
typeFactory,
name,
functionDefinition,
argumentCount,
isGroupedAggregation);
// We might not be able to infer the input types at this moment, if the surrounding
// function does not provide an explicit input type strategy.
final CallContext adaptedContext =
castArguments(typeInference, callContext, null, false);
return typeInference
.getInputTypeStrategy()
.inferInputTypes(adaptedContext, false)
.map(dataTypes -> dataTypes.get(innerCallPosition));
};
}
static SurroundingInfo of(DataType dataType) {
return typeFactory -> Optional.of(dataType);
}
Optional<DataType> inferOutputType(DataTypeFactory typeFactory);
}
/**
* The result of a type inference run. It contains information about how arguments need to be
* adapted in order to comply with the function's signature.
*
* <p>This includes casts that need to be inserted, reordering of arguments (*), or insertion of
* default values (*) where (*) is future work.
*/
@Internal
public static final | SurroundingInfo |
java | apache__flink | flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/sstmerge/Compactor.java | {
"start": 2727,
"end": 3118
} | interface ____ {
void compactFiles(
CompactionOptions var1,
ColumnFamilyHandle var2,
List<String> var3,
int var4,
int var5,
CompactionJobInfo var6)
throws RocksDBException;
CompactionTarget NO_OP = (var1, var2, var3, var4, var5, var6) -> {};
}
}
| CompactionTarget |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java | {
"start": 1673,
"end": 1773
} | class ____ the replication and injection of blocks of a DFS file for simulated storage.
*/
public | tests |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/annotation/authentication/configuration/AuthenticationConfiguration.java | {
"start": 3105,
"end": 8406
} | class ____ {
private AtomicBoolean buildingAuthenticationManager = new AtomicBoolean();
private ApplicationContext applicationContext;
private AuthenticationManager authenticationManager;
private boolean authenticationManagerInitialized;
private List<GlobalAuthenticationConfigurerAdapter> globalAuthConfigurers = Collections.emptyList();
private ObjectPostProcessor<Object> objectPostProcessor;
@Bean
public AuthenticationManagerBuilder authenticationManagerBuilder(ObjectPostProcessor<Object> objectPostProcessor,
ApplicationContext context) {
LazyPasswordEncoder defaultPasswordEncoder = new LazyPasswordEncoder(context);
AuthenticationEventPublisher authenticationEventPublisher = getAuthenticationEventPublisher(context);
DefaultPasswordEncoderAuthenticationManagerBuilder result = new DefaultPasswordEncoderAuthenticationManagerBuilder(
objectPostProcessor, defaultPasswordEncoder);
if (authenticationEventPublisher != null) {
result.authenticationEventPublisher(authenticationEventPublisher);
}
return result;
}
@Bean
public static GlobalAuthenticationConfigurerAdapter enableGlobalAuthenticationAutowiredConfigurer(
ApplicationContext context) {
return new EnableGlobalAuthenticationAutowiredConfigurer(context);
}
@Bean
public static InitializeUserDetailsBeanManagerConfigurer initializeUserDetailsBeanManagerConfigurer(
ApplicationContext context) {
return new InitializeUserDetailsBeanManagerConfigurer(context);
}
@Bean
public static InitializeAuthenticationProviderBeanManagerConfigurer initializeAuthenticationProviderBeanManagerConfigurer(
ApplicationContext context) {
return new InitializeAuthenticationProviderBeanManagerConfigurer(context);
}
public AuthenticationManager getAuthenticationManager() {
if (this.authenticationManagerInitialized) {
return this.authenticationManager;
}
AuthenticationManagerBuilder authBuilder = this.applicationContext.getBean(AuthenticationManagerBuilder.class);
if (this.buildingAuthenticationManager.getAndSet(true)) {
return new AuthenticationManagerDelegator(authBuilder);
}
for (GlobalAuthenticationConfigurerAdapter config : this.globalAuthConfigurers) {
authBuilder.apply(config);
}
this.authenticationManager = authBuilder.build();
if (this.authenticationManager == null) {
this.authenticationManager = getAuthenticationManagerBean();
}
this.authenticationManagerInitialized = true;
return this.authenticationManager;
}
@Autowired(required = false)
public void setGlobalAuthenticationConfigurers(List<GlobalAuthenticationConfigurerAdapter> configurers) {
configurers.sort(AnnotationAwareOrderComparator.INSTANCE);
this.globalAuthConfigurers = configurers;
}
@Autowired
public void setApplicationContext(ApplicationContext applicationContext) {
this.applicationContext = applicationContext;
}
@Autowired
public void setObjectPostProcessor(ObjectPostProcessor<Object> objectPostProcessor) {
this.objectPostProcessor = objectPostProcessor;
}
private AuthenticationEventPublisher getAuthenticationEventPublisher(ApplicationContext context) {
if (context.getBeanNamesForType(AuthenticationEventPublisher.class).length > 0) {
return context.getBean(AuthenticationEventPublisher.class);
}
return this.objectPostProcessor.postProcess(new DefaultAuthenticationEventPublisher());
}
@SuppressWarnings("unchecked")
private <T> T lazyBean(Class<T> interfaceName) {
LazyInitTargetSource lazyTargetSource = new LazyInitTargetSource();
String[] beanNamesForType = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(this.applicationContext,
interfaceName);
if (beanNamesForType.length == 0) {
return null;
}
String beanName = getBeanName(interfaceName, beanNamesForType);
lazyTargetSource.setTargetBeanName(beanName);
lazyTargetSource.setBeanFactory(this.applicationContext);
ProxyFactoryBean proxyFactory = new ProxyFactoryBean();
proxyFactory = this.objectPostProcessor.postProcess(proxyFactory);
proxyFactory.setTargetSource(lazyTargetSource);
return (T) proxyFactory.getObject();
}
private <T> String getBeanName(Class<T> interfaceName, String[] beanNamesForType) {
if (beanNamesForType.length == 1) {
return beanNamesForType[0];
}
List<String> primaryBeanNames = getPrimaryBeanNames(beanNamesForType);
Assert.isTrue(primaryBeanNames.size() != 0, () -> "Found " + beanNamesForType.length + " beans for type "
+ interfaceName + ", but none marked as primary");
Assert.isTrue(primaryBeanNames.size() == 1,
() -> "Found " + primaryBeanNames.size() + " beans for type " + interfaceName + " marked as primary");
return primaryBeanNames.get(0);
}
private List<String> getPrimaryBeanNames(String[] beanNamesForType) {
List<String> list = new ArrayList<>();
if (!(this.applicationContext instanceof ConfigurableApplicationContext)) {
return Collections.emptyList();
}
for (String beanName : beanNamesForType) {
if (((ConfigurableApplicationContext) this.applicationContext).getBeanFactory()
.getBeanDefinition(beanName)
.isPrimary()) {
list.add(beanName);
}
}
return list;
}
private AuthenticationManager getAuthenticationManagerBean() {
return lazyBean(AuthenticationManager.class);
}
private static | AuthenticationConfiguration |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetScriptLanguageAction.java | {
"start": 1025,
"end": 1620
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(GET, "/_script_language"));
}
@Override
public String getName() {
return "script_language_action";
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
return channel -> client.execute(
GetScriptLanguageAction.INSTANCE,
new GetScriptLanguageRequest(),
new RestToXContentListener<>(channel)
);
}
}
| RestGetScriptLanguageAction |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/abilities/sink/WritingMetadataSpec.java | {
"start": 1928,
"end": 3708
} | class ____ implements SinkAbilitySpec {
public static final String FIELD_NAME_METADATA_KEYS = "metadataKeys";
public static final String FIELD_NAME_CONSUMED_TYPE = "consumedType";
@JsonProperty(FIELD_NAME_METADATA_KEYS)
private final List<String> metadataKeys;
@JsonProperty(FIELD_NAME_CONSUMED_TYPE)
private final LogicalType consumedType;
@JsonCreator
public WritingMetadataSpec(
@JsonProperty(FIELD_NAME_METADATA_KEYS) List<String> metadataKeys,
@JsonProperty(FIELD_NAME_CONSUMED_TYPE) LogicalType consumedType) {
this.metadataKeys = metadataKeys;
this.consumedType = consumedType;
}
@Override
public void apply(DynamicTableSink tableSink) {
if (tableSink instanceof SupportsWritingMetadata) {
DataType consumedDataType = TypeConversions.fromLogicalToDataType(consumedType);
((SupportsWritingMetadata) tableSink)
.applyWritableMetadata(metadataKeys, consumedDataType);
} else {
throw new TableException(
String.format(
"%s does not support SupportsWritingMetadata.",
tableSink.getClass().getName()));
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
WritingMetadataSpec that = (WritingMetadataSpec) o;
return Objects.equals(metadataKeys, that.metadataKeys)
&& Objects.equals(consumedType, that.consumedType);
}
@Override
public int hashCode() {
return Objects.hash(metadataKeys, consumedType);
}
}
| WritingMetadataSpec |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java | {
"start": 4516,
"end": 55713
} | class ____ extends ESAllocationTestCase {
private ProjectId projectId;
@Override
public void setUp() throws Exception {
super.setUp();
projectId = randomProjectIdOrDefault();
}
public void testMoveShardCommand() {
AllocationService allocation = createAllocationService(
Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()
);
logger.info("creating an index with 1 shard, no replica");
Metadata metadata = Metadata.builder()
.put(
ProjectMetadata.builder(projectId)
.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(0))
)
.build();
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject(projectId).index("test"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(GlobalRoutingTable.builder().put(projectId, routingTable).build())
.build();
logger.info("adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
logger.info("start primary shard");
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
logger.info("move the shard");
String existingNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
String toNodeId;
if ("node1".equals(existingNodeId)) {
toNodeId = "node2";
} else {
toNodeId = "node1";
}
ClusterState newState = allocation.reroute(
clusterState,
new AllocationCommands(new MoveAllocationCommand("test", 0, existingNodeId, toNodeId, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node(existingNodeId).iterator().next().state(), equalTo(ShardRoutingState.RELOCATING));
assertThat(clusterState.getRoutingNodes().node(toNodeId).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING));
logger.info("finish moving the shard");
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().node(existingNodeId).isEmpty(), equalTo(true));
assertThat(clusterState.getRoutingNodes().node(toNodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED));
}
private AbstractAllocateAllocationCommand randomAllocateCommand(String index, int shardId, String node) {
return randomFrom(
new AllocateReplicaAllocationCommand(index, shardId, node, projectId),
new AllocateEmptyPrimaryAllocationCommand(index, shardId, node, true, projectId),
new AllocateStalePrimaryAllocationCommand(index, shardId, node, true, projectId)
);
}
public void testAllocateCommand() {
AllocationService allocation = createAllocationService(
Settings.builder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")
.build()
);
final String index = "test";
logger.info("--> building initial routing table");
Metadata metadata = Metadata.builder()
.put(
ProjectMetadata.builder(projectId)
.put(
IndexMetadata.builder(index)
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(1)
.putInSyncAllocationIds(0, Collections.singleton("asdf"))
.putInSyncAllocationIds(1, Collections.singleton("qwertz"))
)
)
.build();
// shard routing is added as "from recovery" instead of "new index creation" so that we can test below that allocating an empty
// primary with accept_data_loss flag set to false fails
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsRecovery(metadata.getProject(projectId).index(index))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(GlobalRoutingTable.builder().put(projectId, routingTable).build())
.build();
final ShardId shardId = new ShardId(metadata.getProject(projectId).index(index).getIndex(), 0);
logger.info("--> adding 3 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(
DiscoveryNodes.builder()
.add(newNode("node1"))
.add(newNode("node2"))
.add(newNode("node3"))
.add(newNode("node4", singleton(DiscoveryNodeRole.MASTER_ROLE)))
)
.build();
clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
assertThat(shardsWithState(clusterState.getRoutingNodes(), INITIALIZING).size(), equalTo(0));
logger.info("--> allocating to non-existent node, should fail");
try {
allocation.reroute(
clusterState,
new AllocationCommands(randomAllocateCommand(index, shardId.id(), "node42")),
false,
false,
false,
ActionListener.noop()
);
fail("expected IllegalArgumentException when allocating to non-existing node");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("failed to resolve [node42], no matching nodes"));
}
logger.info("--> allocating to non-data node, should fail");
try {
allocation.reroute(
clusterState,
new AllocationCommands(randomAllocateCommand(index, shardId.id(), "node4")),
false,
false,
false,
ActionListener.noop()
);
fail("expected IllegalArgumentException when allocating to non-data node");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("allocation can only be done on data nodes"));
}
logger.info("--> allocating non-existing shard, should fail");
try {
allocation.reroute(
clusterState,
new AllocationCommands(randomAllocateCommand("test", 1, "node2")),
false,
false,
false,
ActionListener.noop()
);
fail("expected ShardNotFoundException when allocating non-existing shard");
} catch (ShardNotFoundException e) {
assertThat(e.getMessage(), containsString("no such shard"));
}
logger.info("--> allocating non-existing index, should fail");
try {
allocation.reroute(
clusterState,
new AllocationCommands(randomAllocateCommand("test2", 0, "node2")),
false,
false,
false,
ActionListener.noop()
);
fail("expected ShardNotFoundException when allocating non-existing index");
} catch (IndexNotFoundException e) {
assertThat(e.getMessage(), containsString("no such index [test2]"));
}
logger.info("--> allocating empty primary with acceptDataLoss flag set to false");
try {
allocation.reroute(
clusterState,
new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", false, projectId)),
false,
false,
false,
ActionListener.noop()
);
fail("expected IllegalArgumentException when allocating empty primary with acceptDataLoss flag set to false");
} catch (IllegalArgumentException e) {
assertThat(
e.getMessage(),
containsString(
"allocating an empty primary for "
+ shardId
+ " can result in data loss. Please confirm by setting the accept_data_loss parameter to true"
)
);
}
logger.info("--> allocating stale primary with acceptDataLoss flag set to false");
try {
allocation.reroute(
clusterState,
new AllocationCommands(new AllocateStalePrimaryAllocationCommand(index, shardId.id(), "node1", false, projectId)),
false,
false,
false,
ActionListener.noop()
);
fail("expected IllegalArgumentException when allocating stale primary with acceptDataLoss flag set to false");
} catch (IllegalArgumentException e) {
assertThat(
e.getMessage(),
containsString(
"allocating an empty primary for "
+ shardId
+ " can result in data loss. Please confirm by setting the accept_data_loss parameter to true"
)
);
}
logger.info("--> allocating empty primary with acceptDataLoss flag set to true");
ClusterState newState = allocation.reroute(
clusterState,
new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", true, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(INITIALIZING), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
logger.info("--> start the primary shard");
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
logger.info("--> allocate the replica shard on the primary shard node, should fail");
try {
allocation.reroute(
clusterState,
new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node1", projectId)),
false,
false,
false,
ActionListener.noop()
);
fail("expected IllegalArgumentException when allocating replica shard on the primary shard node");
} catch (IllegalArgumentException e) {}
logger.info("--> allocate the replica shard on the second node");
newState = allocation.reroute(
clusterState,
new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2", projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(INITIALIZING), equalTo(1));
logger.info("--> start the replica shard");
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(1));
logger.info("--> verify that we fail when there are no unassigned shards");
try {
allocation.reroute(
clusterState,
new AllocationCommands(randomAllocateCommand("test", 0, "node3")),
false,
false,
false,
ActionListener.noop()
);
fail("expected IllegalArgumentException when allocating shard while no unassigned shard available");
} catch (IllegalArgumentException e) {}
}
public void testAllocateStalePrimaryCommand() {
AllocationService allocation = createAllocationService(
Settings.builder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")
.build()
);
final String index = "test";
logger.info("--> building initial routing table");
Metadata metadata = Metadata.builder()
.put(
ProjectMetadata.builder(projectId)
.put(
IndexMetadata.builder(index)
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(1)
.putInSyncAllocationIds(0, Collections.singleton("asdf"))
.putInSyncAllocationIds(1, Collections.singleton("qwertz"))
)
)
.build();
// shard routing is added as "from recovery" instead of "new index creation" so that we can test below that allocating an empty
// primary with accept_data_loss flag set to false fails
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsRecovery(metadata.getProject(projectId).index(index))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(GlobalRoutingTable.builder().put(projectId, routingTable).build())
.build();
final String node1 = "node1";
final String node2 = "node2";
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode(node1)).add(newNode(node2))).build();
clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
// mark all shards as stale
final List<ShardRouting> shardRoutings = shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED);
assertThat(shardRoutings, hasSize(2));
logger.info("--> allocating empty primary with acceptDataLoss flag set to true");
clusterState = allocation.reroute(
clusterState,
new AllocationCommands(new AllocateStalePrimaryAllocationCommand(index, 0, node1, true, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
RoutingNode routingNode1 = clusterState.getRoutingNodes().node(node1);
assertThat(routingNode1.size(), equalTo(1));
assertThat(routingNode1.numberOfShardsWithState(INITIALIZING), equalTo(1));
Set<String> inSyncAllocationIds = clusterState.metadata().getProject(projectId).index(index).inSyncAllocationIds(0);
assertThat(inSyncAllocationIds, equalTo(Collections.singleton(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID)));
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
routingNode1 = clusterState.getRoutingNodes().node(node1);
assertThat(routingNode1.size(), equalTo(1));
assertThat(routingNode1.numberOfShardsWithState(STARTED), equalTo(1));
inSyncAllocationIds = clusterState.metadata().getProject(projectId).index(index).inSyncAllocationIds(0);
assertThat(inSyncAllocationIds, hasSize(1));
assertThat(inSyncAllocationIds, not(Collections.singleton(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID)));
}
public void testCancelCommand() {
AllocationService allocation = createAllocationService(
Settings.builder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")
.build()
);
logger.info("--> building initial routing table");
Metadata metadata = Metadata.builder()
.put(
ProjectMetadata.builder(projectId)
.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1))
)
.build();
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject(projectId).index("test"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(GlobalRoutingTable.builder().put(projectId, routingTable).build())
.build();
logger.info("--> adding 3 nodes");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")))
.build();
clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
assertThat(shardsWithState(clusterState.getRoutingNodes(), INITIALIZING).size(), equalTo(0));
logger.info("--> allocating empty primary shard with accept_data_loss flag set to true");
ClusterState newState = allocation.reroute(
clusterState,
new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", true, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(INITIALIZING), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
logger.info("--> cancel primary allocation, make sure it fails...");
try {
allocation.reroute(
clusterState,
new AllocationCommands(new CancelAllocationCommand("test", 0, "node1", false, projectId)),
false,
false,
false,
ActionListener.noop()
);
fail();
} catch (IllegalArgumentException e) {}
logger.info("--> start the primary shard");
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
logger.info("--> cancel primary allocation, make sure it fails...");
try {
allocation.reroute(
clusterState,
new AllocationCommands(new CancelAllocationCommand("test", 0, "node1", false, projectId)),
false,
false,
false,
ActionListener.noop()
);
fail();
} catch (IllegalArgumentException e) {}
logger.info("--> allocate the replica shard on on the second node");
newState = allocation.reroute(
clusterState,
new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2", projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(INITIALIZING), equalTo(1));
logger.info("--> cancel the relocation allocation");
newState = allocation.reroute(
clusterState,
new AllocationCommands(new CancelAllocationCommand("test", 0, "node2", false, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(0));
logger.info("--> allocate the replica shard on on the second node");
newState = allocation.reroute(
clusterState,
new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2", projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(INITIALIZING), equalTo(1));
logger.info("--> cancel the primary being replicated, make sure it fails");
try {
allocation.reroute(
clusterState,
new AllocationCommands(new CancelAllocationCommand("test", 0, "node1", false, projectId)),
false,
false,
false,
ActionListener.noop()
);
fail();
} catch (IllegalArgumentException e) {}
logger.info("--> start the replica shard");
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(1));
logger.info("--> cancel allocation of the replica shard");
newState = allocation.reroute(
clusterState,
new AllocationCommands(new CancelAllocationCommand("test", 0, "node2", false, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(0));
logger.info("--> allocate the replica shard on on the second node");
newState = allocation.reroute(
clusterState,
new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2", projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(INITIALIZING), equalTo(1));
logger.info("--> start the replica shard");
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(1));
logger.info("--> move the replica shard");
clusterState = allocation.reroute(
clusterState,
new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3", projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(RELOCATING), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
if (randomBoolean()) {
logger.info("--> cancel the primary allocation (with allow_primary set to true)");
newState = allocation.reroute(
clusterState,
new AllocationCommands(new CancelAllocationCommand("test", 0, "node1", true, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).findFirst().get().primary(), equalTo(true));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(0));
} else {
logger.info("--> cancel the move of the replica shard");
clusterState = allocation.reroute(
clusterState,
new AllocationCommands(new CancelAllocationCommand("test", 0, "node3", false, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(1));
logger.info("--> move the replica shard again");
clusterState = allocation.reroute(
clusterState,
new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3", projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(RELOCATING), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
logger.info("--> cancel the source replica shard");
clusterState = allocation.reroute(
clusterState,
new AllocationCommands(new CancelAllocationCommand("test", 0, "node2", false, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
assertThat(
clusterState.getRoutingNodes().node("node3").shardsWithState(INITIALIZING).findFirst().get().relocatingNodeId(),
nullValue()
);
logger.info("--> start the former target replica shard");
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node3").numberOfShardsWithState(STARTED), equalTo(1));
logger.info("--> cancel the primary allocation (with allow_primary set to true)");
newState = allocation.reroute(
clusterState,
new AllocationCommands(new CancelAllocationCommand("test", 0, "node1", true, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node3").shardsWithState(STARTED).findFirst().get().primary(), equalTo(true));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
}
}
public void testCanceledShardIsInitializedRespectingAllocationDeciders() {
var allocationId1 = AllocationId.newInitializing(UUIDs.randomBase64UUID());
var allocationId2 = AllocationId.newInitializing(UUIDs.randomBase64UUID());
var indexMetadata = IndexMetadata.builder("test")
.settings(indexSettings(IndexVersion.current(), 1, 1).put("index.routing.allocation.exclude._id", "node-0"))
.putInSyncAllocationIds(0, Set.of(allocationId1.getId(), allocationId2.getId()))
.build();
var shardId = new ShardId(indexMetadata.getIndex(), 0);
ShardRouting primary = shardRoutingBuilder(shardId, "node-0", true, STARTED).withAllocationId(allocationId1).build();
ShardRouting replica = shardRoutingBuilder(shardId, "node-1", false, STARTED).withAllocationId(allocationId2).build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.nodes(
DiscoveryNodes.builder()
.add(newNode("node-0", Version.V_8_10_0, IndexVersions.V_8_10_0))
.add(newNode("node-1", Version.V_8_9_0, IndexVersions.V_8_9_0))
.add(newNode("node-2", Version.V_8_9_0, IndexVersions.V_8_9_0))
)
.metadata(Metadata.builder().put(ProjectMetadata.builder(projectId).put(indexMetadata, false)))
.routingTable(
GlobalRoutingTable.builder()
.put(
projectId,
RoutingTable.builder().add(IndexRoutingTable.builder(shardId.getIndex()).addShard(primary).addShard(replica))
)
.build()
)
.build();
var allocation = createAllocationService();
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
if (allocation.shardsAllocator instanceof DesiredBalanceShardsAllocator dbsa) {
// ShardAssignment still contains `node-0` even though `can_remain_decision=no` for it
assertThat(dbsa.getDesiredBalance().getAssignment(shardId), equalTo(new ShardAssignment(Set.of("node-0", "node-1"), 2, 0, 0)));
}
clusterState = allocation.reroute(
clusterState,
new AllocationCommands(new CancelAllocationCommand(shardId.getIndexName(), 0, "node-0", true, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
assertThat(clusterState.getRoutingNodes().node("node-0").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node-1").numberOfShardsWithState(STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node-2").numberOfShardsWithState(STARTED), equalTo(1));
if (allocation.shardsAllocator instanceof DesiredBalanceShardsAllocator dbsa) {
assertThat(dbsa.getDesiredBalance().getAssignment(shardId), equalTo(new ShardAssignment(Set.of("node-1", "node-2"), 2, 0, 0)));
}
}
public void testSerialization() throws Exception {
AllocationCommands commands = new AllocationCommands(
new AllocateEmptyPrimaryAllocationCommand("test", 1, "node1", true, projectId),
new AllocateStalePrimaryAllocationCommand("test", 2, "node1", true, projectId),
new AllocateReplicaAllocationCommand("test", 2, "node1", projectId),
new MoveAllocationCommand("test", 3, "node2", "node3", projectId),
new CancelAllocationCommand("test", 4, "node5", true, projectId)
);
BytesStreamOutput bytes = new BytesStreamOutput();
AllocationCommands.writeTo(commands, bytes);
StreamInput in = bytes.bytes().streamInput();
// Since the commands are named writeable we need to register them and wrap the input stream
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(NetworkModule.getNamedWriteables());
in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry);
// Now we can read them!
assertThat(AllocationCommands.readFrom(in), equalTo(commands));
}
public void testXContent() throws Exception {
String commands = """
{
"commands": [
{
"allocate_empty_primary": {
"index": "test",
"shard": 1,
"node": "node1",
"accept_data_loss": true
}
},
{
"allocate_stale_primary": {
"index": "test",
"shard": 2,
"node": "node1",
"accept_data_loss": true
}
},
{
"allocate_replica": {
"index": "test",
"shard": 2,
"node": "node1"
}
},
{
"move": {
"index": "test",
"shard": 3,
"from_node": "node2",
"to_node": "node3"
}
},
{
"cancel": {
"index": "test",
"shard": 4,
"node": "node5",
"allow_primary": true
}
}
]
}
""";
try (XContentParser parser = createParser(JsonXContent.jsonXContent, commands)) {
// move two tokens, parser expected to be "on" `commands` field
parser.nextToken();
parser.nextToken();
assertThat(
AllocationCommands.fromXContent(parser, projectId),
equalTo(
new AllocationCommands(
new AllocateEmptyPrimaryAllocationCommand("test", 1, "node1", true, projectId),
new AllocateStalePrimaryAllocationCommand("test", 2, "node1", true, projectId),
new AllocateReplicaAllocationCommand("test", 2, "node1", projectId),
new MoveAllocationCommand("test", 3, "node2", "node3", projectId),
new CancelAllocationCommand("test", 4, "node5", true, projectId)
)
)
);
}
}
@Override
protected NamedXContentRegistry xContentRegistry() {
return new NamedXContentRegistry(NetworkModule.getNamedXContents());
}
public void testMoveShardToNonDataNode() {
AllocationService allocation = createAllocationService(
Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()
);
logger.info("creating an index with 1 shard, no replica");
Metadata metadata = Metadata.builder()
.put(
ProjectMetadata.builder(projectId)
.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(0))
)
.build();
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject(projectId).index("test"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(GlobalRoutingTable.builder().put(projectId, routingTable).build())
.build();
logger.info("--> adding two nodes");
DiscoveryNode node1 = DiscoveryNodeUtils.builder("node1")
.name("node1")
.ephemeralId("node1")
.address("test1", "test1", buildNewFakeTransportAddress())
.roles(MASTER_DATA_ROLES)
.build();
DiscoveryNode node2 = DiscoveryNodeUtils.builder("node2")
.name("node2")
.ephemeralId("node2")
.address("test2", "test2", buildNewFakeTransportAddress())
.roles(new HashSet<>(randomSubsetOf(Set.of(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.INGEST_ROLE))))
.build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node1).add(node2)).build();
logger.info("start primary shard");
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
Index index = clusterState.getMetadata().getProject(projectId).index("test").getIndex();
MoveAllocationCommand command = new MoveAllocationCommand(index.getName(), 0, "node1", "node2", projectId);
RoutingAllocation routingAllocation = new RoutingAllocation(
new AllocationDeciders(Collections.emptyList()),
clusterState.mutableRoutingNodes(),
clusterState,
ClusterInfo.EMPTY,
SnapshotShardSizeInfo.EMPTY,
System.nanoTime()
);
logger.info("--> executing move allocation command to non-data node");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> command.execute(routingAllocation, false));
assertEquals(
"[move_allocation] can't move [test][0] from "
+ node1
+ " to "
+ node2
+ ": source ["
+ node2.getName()
+ "] is not a data node.",
e.getMessage()
);
}
public void testMoveShardFromNonDataNode() {
AllocationService allocation = createAllocationService(
Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()
);
logger.info("creating an index with 1 shard, no replica");
Metadata metadata = Metadata.builder()
.put(
ProjectMetadata.builder(projectId)
.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(0))
)
.build();
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsNew(metadata.getProject(projectId).index("test"))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(GlobalRoutingTable.builder().put(projectId, routingTable).build())
.build();
logger.info("--> adding two nodes");
DiscoveryNode node1 = DiscoveryNodeUtils.builder("node1")
.name("node1")
.ephemeralId("node1")
.address("test1", "test1", buildNewFakeTransportAddress())
.roles(MASTER_DATA_ROLES)
.build();
DiscoveryNode node2 = DiscoveryNodeUtils.builder("node2")
.name("node2")
.ephemeralId("node2")
.address("test2", "test2", buildNewFakeTransportAddress())
.roles(new HashSet<>(randomSubsetOf(Set.of(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.INGEST_ROLE))))
.build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node1).add(node2)).build();
logger.info("start primary shard");
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
Index index = clusterState.getMetadata().getProject(projectId).index("test").getIndex();
MoveAllocationCommand command = new MoveAllocationCommand(index.getName(), 0, "node2", "node1", projectId);
RoutingAllocation routingAllocation = new RoutingAllocation(
new AllocationDeciders(Collections.emptyList()),
clusterState.mutableRoutingNodes(),
clusterState,
ClusterInfo.EMPTY,
SnapshotShardSizeInfo.EMPTY,
System.nanoTime()
);
logger.info("--> executing move allocation command from non-data node");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> command.execute(routingAllocation, false));
assertEquals(
"[move_allocation] can't move [test][0] from "
+ node2
+ " to "
+ node1
+ ": source ["
+ node2.getName()
+ "] is not a data node.",
e.getMessage()
);
}
public void testConflictingCommandsInSingleRequest() {
AllocationService allocation = createAllocationService(
Settings.builder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")
.build()
);
final String index1 = "test1";
final String index2 = "test2";
final String index3 = "test3";
logger.info("--> building initial routing table");
Metadata metadata = Metadata.builder()
.put(
ProjectMetadata.builder(projectId)
.put(
IndexMetadata.builder(index1)
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(1)
.putInSyncAllocationIds(0, singleton("randomAllocID"))
.putInSyncAllocationIds(1, singleton("randomAllocID2"))
)
.put(
IndexMetadata.builder(index2)
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(1)
.putInSyncAllocationIds(0, singleton("randomAllocID"))
.putInSyncAllocationIds(1, singleton("randomAllocID2"))
)
.put(
IndexMetadata.builder(index3)
.settings(settings(IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(1)
.putInSyncAllocationIds(0, singleton("randomAllocID"))
.putInSyncAllocationIds(1, singleton("randomAllocID2"))
)
)
.build();
RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY)
.addAsRecovery(metadata.getProject(projectId).index(index1))
.addAsRecovery(metadata.getProject(projectId).index(index2))
.addAsRecovery(metadata.getProject(projectId).index(index3))
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.routingTable(GlobalRoutingTable.builder().put(projectId, routingTable).build())
.build();
final String node1 = "node1";
final String node2 = "node2";
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode(node1)).add(newNode(node2))).build();
final ClusterState finalClusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
logger.info("--> allocating same index primary in multiple commands should fail");
assertThat(expectThrows(IllegalArgumentException.class, () -> {
allocation.reroute(
finalClusterState,
new AllocationCommands(
new AllocateStalePrimaryAllocationCommand(index1, 0, node1, true, projectId),
new AllocateStalePrimaryAllocationCommand(index1, 0, node2, true, projectId)
),
false,
false,
false,
ActionListener.noop()
);
}).getMessage(), containsString("primary [" + index1 + "][0] is already assigned"));
assertThat(expectThrows(IllegalArgumentException.class, () -> {
allocation.reroute(
finalClusterState,
new AllocationCommands(
new AllocateEmptyPrimaryAllocationCommand(index2, 0, node1, true, projectId),
new AllocateEmptyPrimaryAllocationCommand(index2, 0, node2, true, projectId)
),
false,
false,
false,
ActionListener.noop()
);
}).getMessage(), containsString("primary [" + index2 + "][0] is already assigned"));
clusterState = allocation.reroute(
clusterState,
new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(index3, 0, node1, true, projectId)),
false,
false,
false,
ActionListener.noop()
).clusterState();
clusterState = startInitializingShardsAndReroute(allocation, clusterState);
final ClusterState updatedClusterState = clusterState;
assertThat(updatedClusterState.getRoutingNodes().node(node1).numberOfShardsWithState(STARTED), equalTo(1));
logger.info("--> subsequent replica allocation fails as all configured replicas have been allocated");
assertThat(expectThrows(IllegalArgumentException.class, () -> {
allocation.reroute(
updatedClusterState,
new AllocationCommands(
new AllocateReplicaAllocationCommand(index3, 0, node2, projectId),
new AllocateReplicaAllocationCommand(index3, 0, node2, projectId)
),
false,
false,
false,
ActionListener.noop()
);
}).getMessage(), containsString("all copies of [" + index3 + "][0] are already assigned. Use the move allocation command instead"));
}
}
| AllocationCommandsTests |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest124.java | {
"start": 925,
"end": 3711
} | class ____ extends TestCase {
public void test_0() throws Exception {
String sql = "SELECT\n" +
"J01.COL_A,\n" +
"J01.COL_B,\n" +
"\"SUM\"(J01.COL_C) OVER (\n" +
"PARTITION BY J01.COL_A ORDER BY J01.COL_B NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW\n" +
") AS COL_C\n" +
"FROM\n" +
"TAB_A J01";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
assertEquals(1, statementList.size());
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
assertEquals("SELECT J01.COL_A, J01.COL_B, \"SUM\"(J01.COL_C) OVER (PARTITION BY J01.COL_A ORDER BY J01.COL_B NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS COL_C\n" +
"FROM TAB_A J01", stmt.toString());
}
public void test_1() throws Exception {
String sql = "SELECT\n" +
"J01.COL_A,\n" +
"J01.COL_B,\n" +
"\"SUM\"(J01.COL_C) OVER (\n" +
"PARTITION BY J01.COL_A ORDER BY J01.COL_B NULLS FIRST ROWS BETWEEN 5 PRECEDING AND 8 FOLLOWING\n" +
") AS COL_C\n" +
"FROM\n" +
"TAB_A J01";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
assertEquals(1, statementList.size());
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
System.out.println(stmt.toString());
assertEquals("SELECT J01.COL_A, J01.COL_B, \"SUM\"(J01.COL_C) OVER (PARTITION BY J01.COL_A ORDER BY J01.COL_B NULLS FIRST ROWS BETWEEN 5 PRECEDING AND 8 FOLLOWING) AS COL_C\n" +
"FROM TAB_A J01", stmt.toString());
}
public void test_2() throws Exception {
String sql = "SELECT\n" +
"J01.COL_A,\n" +
"J01.COL_B,\n" +
"\"SUM\"(J01.COL_C) OVER (\n" +
"PARTITION BY J01.COL_A ORDER BY J01.COL_B NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING\n" +
") AS COL_C\n" +
"FROM\n" +
"TAB_A J01";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
assertEquals(1, statementList.size());
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
System.out.println(stmt.toString());
assertEquals("SELECT J01.COL_A, J01.COL_B, \"SUM\"(J01.COL_C) OVER (PARTITION BY J01.COL_A ORDER BY J01.COL_B NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS COL_C\n" +
"FROM TAB_A J01", stmt.toString());
}
}
| OracleSelectTest124 |
java | quarkusio__quarkus | integration-tests/logging-min-level-set/src/test/java/io/quarkus/it/logging/minlevel/set/LoggingMinLevelByDefaultTest.java | {
"start": 577,
"end": 1038
} | class ____ {
@Test
public void testDebug() {
given()
.when().get("/log/bydefault/debug")
.then()
.statusCode(200)
.body(is("true"));
}
@Test
public void testNotTrace() {
given()
.when().get("/log/bydefault/not-trace")
.then()
.statusCode(200)
.body(is("true"));
}
}
| LoggingMinLevelByDefaultTest |
java | google__dagger | javatests/dagger/internal/codegen/DelegateRequestRepresentationTest.java | {
"start": 15016,
"end": 15436
} | interface ____ {",
" @Binds Supertype to(Subtype subtype);",
"}");
Source component =
CompilerTests.javaSource(
"test.RequestsSubtypeAsProvider",
"package test;",
"",
"import dagger.Component;",
"import javax.inject.Provider;",
"",
"@Component(modules = other.TestModule.class)",
" | TestModule |
java | netty__netty | handler/src/main/java/io/netty/handler/ssl/OpenSslX509KeyManagerFactory.java | {
"start": 2490,
"end": 4149
} | class ____ extends KeyManagerFactory {
private final OpenSslKeyManagerFactorySpi spi;
public OpenSslX509KeyManagerFactory() {
this(newOpenSslKeyManagerFactorySpi(null));
}
public OpenSslX509KeyManagerFactory(Provider provider) {
this(newOpenSslKeyManagerFactorySpi(provider));
}
public OpenSslX509KeyManagerFactory(String algorithm, Provider provider) throws NoSuchAlgorithmException {
this(newOpenSslKeyManagerFactorySpi(algorithm, provider));
}
private OpenSslX509KeyManagerFactory(OpenSslKeyManagerFactorySpi spi) {
super(spi, spi.kmf.getProvider(), spi.kmf.getAlgorithm());
this.spi = spi;
}
private static OpenSslKeyManagerFactorySpi newOpenSslKeyManagerFactorySpi(Provider provider) {
try {
return newOpenSslKeyManagerFactorySpi(null, provider);
} catch (NoSuchAlgorithmException e) {
// This should never happen as we use the default algorithm.
throw new IllegalStateException(e);
}
}
private static OpenSslKeyManagerFactorySpi newOpenSslKeyManagerFactorySpi(String algorithm, Provider provider)
throws NoSuchAlgorithmException {
if (algorithm == null) {
algorithm = KeyManagerFactory.getDefaultAlgorithm();
}
return new OpenSslKeyManagerFactorySpi(
provider == null ? KeyManagerFactory.getInstance(algorithm) :
KeyManagerFactory.getInstance(algorithm, provider));
}
OpenSslKeyMaterialProvider newProvider() {
return spi.newProvider();
}
private static final | OpenSslX509KeyManagerFactory |
java | google__guava | guava-tests/test/com/google/common/collect/ImmutableSortedMultisetTest.java | {
"start": 10696,
"end": 20497
} | class ____ implements Iterable<String> {
int count = 0;
@Override
public Iterator<String> iterator() {
count++;
return asList("a", "b", "a").iterator();
}
}
public void testCopyOf_plainIterable() {
CountingIterable iterable = new CountingIterable();
Multiset<String> multiset = ImmutableSortedMultiset.copyOf(iterable);
assertEquals(HashMultiset.create(asList("a", "b", "a")), multiset);
assertEquals(1, iterable.count);
}
public void testCopyOf_shortcut_empty() {
Collection<String> c = ImmutableSortedMultiset.of();
assertSame(c, ImmutableSortedMultiset.copyOf(c));
}
public void testCopyOf_shortcut_singleton() {
Collection<String> c = ImmutableSortedMultiset.of("a");
assertSame(c, ImmutableSortedMultiset.copyOf(c));
}
public void testCopyOf_shortcut_immutableMultiset() {
Collection<String> c = ImmutableSortedMultiset.of("a", "b", "c");
assertSame(c, ImmutableSortedMultiset.copyOf(c));
}
public void testForEachEntry() {
ImmutableSortedMultiset<String> multiset =
ImmutableSortedMultiset.<String>naturalOrder().add("a").add("b").add("a").add("c").build();
List<Multiset.Entry<String>> entries = new ArrayList<>();
multiset.forEachEntry((e, c) -> entries.add(Multisets.immutableEntry(e, c)));
assertThat(entries)
.containsExactly(
Multisets.immutableEntry("a", 2),
Multisets.immutableEntry("b", 1),
Multisets.immutableEntry("c", 1))
.inOrder();
}
public void testBuilderAdd() {
ImmutableSortedMultiset<String> multiset =
ImmutableSortedMultiset.<String>naturalOrder().add("a").add("b").add("a").add("c").build();
assertEquals(HashMultiset.create(asList("a", "b", "a", "c")), multiset);
}
public void testBuilderAddAll() {
List<String> a = asList("a", "b");
List<String> b = asList("c", "d");
ImmutableSortedMultiset<String> multiset =
ImmutableSortedMultiset.<String>naturalOrder().addAll(a).addAll(b).build();
assertEquals(HashMultiset.create(asList("a", "b", "c", "d")), multiset);
}
public void testBuilderAddAllMultiset() {
Multiset<String> a = HashMultiset.create(asList("a", "b", "b"));
Multiset<String> b = HashMultiset.create(asList("c", "b"));
ImmutableSortedMultiset<String> multiset =
ImmutableSortedMultiset.<String>naturalOrder().addAll(a).addAll(b).build();
assertEquals(HashMultiset.create(asList("a", "b", "b", "b", "c")), multiset);
}
public void testBuilderAddAllIterator() {
Iterator<String> iterator = asList("a", "b", "a", "c").iterator();
ImmutableSortedMultiset<String> multiset =
ImmutableSortedMultiset.<String>naturalOrder().addAll(iterator).build();
assertEquals(HashMultiset.create(asList("a", "b", "a", "c")), multiset);
}
public void testBuilderAddCopies() {
ImmutableSortedMultiset<String> multiset =
ImmutableSortedMultiset.<String>naturalOrder()
.addCopies("a", 2)
.addCopies("b", 3)
.addCopies("c", 0)
.build();
assertEquals(HashMultiset.create(asList("a", "a", "b", "b", "b")), multiset);
}
public void testBuilderSetCount() {
ImmutableSortedMultiset<String> multiset =
ImmutableSortedMultiset.<String>naturalOrder()
.add("a")
.setCount("a", 2)
.setCount("b", 3)
.build();
assertEquals(HashMultiset.create(asList("a", "a", "b", "b", "b")), multiset);
}
public void testBuilderAddHandlesNullsCorrectly() {
ImmutableSortedMultiset.Builder<String> builder = ImmutableSortedMultiset.naturalOrder();
assertThrows(NullPointerException.class, () -> builder.add((String) null));
}
public void testBuilderAddAllHandlesNullsCorrectly() {
{
ImmutableSortedMultiset.Builder<String> builder = ImmutableSortedMultiset.naturalOrder();
assertThrows(NullPointerException.class, () -> builder.addAll((Collection<String>) null));
}
{
ImmutableSortedMultiset.Builder<String> builder = ImmutableSortedMultiset.naturalOrder();
List<String> listWithNulls = asList("a", null, "b");
assertThrows(NullPointerException.class, () -> builder.addAll(listWithNulls));
}
{
ImmutableSortedMultiset.Builder<String> builder = ImmutableSortedMultiset.naturalOrder();
Multiset<String> multisetWithNull = LinkedHashMultiset.create(asList("a", null, "b"));
assertThrows(NullPointerException.class, () -> builder.addAll(multisetWithNull));
}
}
public void testBuilderAddCopiesHandlesNullsCorrectly() {
ImmutableSortedMultiset.Builder<String> builder = ImmutableSortedMultiset.naturalOrder();
assertThrows(NullPointerException.class, () -> builder.addCopies(null, 2));
}
public void testBuilderAddCopiesIllegal() {
ImmutableSortedMultiset.Builder<String> builder = ImmutableSortedMultiset.naturalOrder();
assertThrows(IllegalArgumentException.class, () -> builder.addCopies("a", -2));
}
public void testBuilderSetCountHandlesNullsCorrectly() {
ImmutableSortedMultiset.Builder<String> builder =
new ImmutableSortedMultiset.Builder<>(Ordering.natural().nullsFirst());
assertThrows(NullPointerException.class, () -> builder.setCount(null, 2));
}
public void testBuilderSetCountIllegal() {
ImmutableSortedMultiset.Builder<String> builder = ImmutableSortedMultiset.naturalOrder();
assertThrows(IllegalArgumentException.class, () -> builder.setCount("a", -2));
}
public void testToImmutableSortedMultiset() {
BiPredicate<ImmutableSortedMultiset<String>, ImmutableSortedMultiset<String>> equivalence =
(ms1, ms2) ->
ms1.equals(ms2)
&& ms1.entrySet().asList().equals(ms2.entrySet().asList())
&& ms1.comparator().equals(ms2.comparator());
CollectorTester.of(
ImmutableSortedMultiset.<String>toImmutableSortedMultiset(
String.CASE_INSENSITIVE_ORDER),
equivalence)
.expectCollects(ImmutableSortedMultiset.emptyMultiset(String.CASE_INSENSITIVE_ORDER))
.expectCollects(
ImmutableSortedMultiset.orderedBy(String.CASE_INSENSITIVE_ORDER)
.addCopies("a", 2)
.addCopies("b", 1)
.addCopies("c", 3)
.build(),
"a",
"c",
"b",
"c",
"A",
"C");
}
public void testToImmutableSortedMultisetCountFunction() {
BiPredicate<ImmutableSortedMultiset<String>, ImmutableSortedMultiset<String>> equivalence =
(ms1, ms2) ->
ms1.equals(ms2)
&& ms1.entrySet().asList().equals(ms2.entrySet().asList())
&& ms1.comparator().equals(ms2.comparator());
CollectorTester.of(
ImmutableSortedMultiset.<String, String>toImmutableSortedMultiset(
String.CASE_INSENSITIVE_ORDER, e -> e, e -> 1),
equivalence)
.expectCollects(ImmutableSortedMultiset.emptyMultiset(String.CASE_INSENSITIVE_ORDER))
.expectCollects(
ImmutableSortedMultiset.orderedBy(String.CASE_INSENSITIVE_ORDER)
.addCopies("a", 2)
.addCopies("b", 1)
.addCopies("c", 3)
.build(),
"a",
"c",
"b",
"c",
"A",
"C");
}
public void testNullPointers() {
new NullPointerTester().testAllPublicStaticMethods(ImmutableSortedMultiset.class);
}
public void testSerialization_empty() {
Collection<String> c = ImmutableSortedMultiset.of();
assertSame(c, SerializableTester.reserialize(c));
}
public void testSerialization_multiple() {
Collection<String> c = ImmutableSortedMultiset.of("a", "b", "a");
Collection<String> copy = SerializableTester.reserializeAndAssert(c);
assertThat(copy).containsExactly("a", "a", "b").inOrder();
}
public void testSerialization_elementSet() {
Multiset<String> c = ImmutableSortedMultiset.of("a", "b", "a");
Collection<String> copy = SerializableTester.reserializeAndAssert(c.elementSet());
assertThat(copy).containsExactly("a", "b").inOrder();
}
public void testSerialization_entrySet() {
Multiset<String> c = ImmutableSortedMultiset.of("a", "b", "c");
SerializableTester.reserializeAndAssert(c.entrySet());
}
public void testEquals_immutableMultiset() {
Collection<String> c = ImmutableSortedMultiset.of("a", "b", "a");
assertEquals(c, ImmutableSortedMultiset.of("a", "b", "a"));
assertEquals(c, ImmutableSortedMultiset.of("a", "a", "b"));
assertThat(c).isNotEqualTo(ImmutableSortedMultiset.of("a", "b"));
assertThat(c).isNotEqualTo(ImmutableSortedMultiset.of("a", "b", "c", "d"));
}
public void testIterationOrder() {
Collection<String> c = ImmutableSortedMultiset.of("a", "b", "a");
assertThat(c).containsExactly("a", "a", "b").inOrder();
}
public void testMultisetWrites() {
Multiset<String> multiset = ImmutableSortedMultiset.of("a", "b", "a");
UnmodifiableCollectionTests.assertMultisetIsUnmodifiable(multiset, "test");
}
public void testAsList() {
ImmutableSortedMultiset<String> multiset = ImmutableSortedMultiset.of("a", "a", "b", "b", "b");
ImmutableList<String> list = multiset.asList();
assertEquals(ImmutableList.of("a", "a", "b", "b", "b"), list);
SerializableTester.reserializeAndAssert(list);
assertEquals(2, list.indexOf("b"));
assertEquals(4, list.lastIndexOf("b"));
}
public void testCopyOfDefensiveCopy() {
// Depending on JDK version, either toArray() or toArray(T[]) may be called... use this class
// rather than mocking to ensure that one of those methods is called.
| CountingIterable |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableHideTest.java | {
"start": 1071,
"end": 2529
} | class ____ extends RxJavaTest {
@Test
public void hiding() {
PublishSubject<Integer> src = PublishSubject.create();
Observable<Integer> dst = src.hide();
assertFalse(dst instanceof PublishSubject);
Observer<Object> o = TestHelper.mockObserver();
dst.subscribe(o);
src.onNext(1);
src.onComplete();
verify(o).onNext(1);
verify(o).onComplete();
verify(o, never()).onError(any(Throwable.class));
}
@Test
public void hidingError() {
PublishSubject<Integer> src = PublishSubject.create();
Observable<Integer> dst = src.hide();
assertFalse(dst instanceof PublishSubject);
Observer<Object> o = TestHelper.mockObserver();
dst.subscribe(o);
src.onError(new TestException());
verify(o, never()).onNext(any());
verify(o, never()).onComplete();
verify(o).onError(any(TestException.class));
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeObservable(new Function<Observable<Object>, ObservableSource<Object>>() {
@Override
public ObservableSource<Object> apply(Observable<Object> o)
throws Exception {
return o.hide();
}
});
}
@Test
public void disposed() {
TestHelper.checkDisposed(PublishSubject.create().hide());
}
}
| ObservableHideTest |
java | elastic__elasticsearch | modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRetryingInputStreamTests.java | {
"start": 1827,
"end": 8336
} | class ____ extends ESTestCase {
private static final String BUCKET_NAME = "test-bucket";
private static final String BLOB_NAME = "test-blob";
private final BlobId blobId = BlobId.of(BUCKET_NAME, BLOB_NAME);
private com.google.api.services.storage.Storage storageRpc;
private com.google.cloud.storage.Storage storage;
private com.google.api.services.storage.Storage.Objects.Get get;
private MeteredStorage meteredStorage;
@Before
public void init() throws IOException {
storageRpc = mock(com.google.api.services.storage.Storage.class);
com.google.api.services.storage.Storage.Objects objects = mock(com.google.api.services.storage.Storage.Objects.class);
when(storageRpc.objects()).thenReturn(objects);
get = mock(com.google.api.services.storage.Storage.Objects.Get.class);
when(objects.get(BUCKET_NAME, BLOB_NAME)).thenReturn(get);
storage = mock(com.google.cloud.storage.Storage.class);
when(storage.getOptions()).thenReturn(
StorageOptions.newBuilder()
.setProjectId("ignore")
.setRetrySettings(RetrySettings.newBuilder().setMaxAttempts(randomIntBetween(1, 3)).build())
.build()
);
meteredStorage = new MeteredStorage(storage, storageRpc, new GcsRepositoryStatsCollector());
}
public void testReadWithinBlobLength() throws IOException {
byte[] bytes = randomByteArrayOfLength(randomIntBetween(1, 512));
int position = randomIntBetween(0, Math.max(0, bytes.length - 1));
int maxLength = bytes.length - position; // max length to read if length param exceeds buffer size
int length = randomIntBetween(0, Integer.MAX_VALUE - 1);
GoogleCloudStorageRetryingInputStream stream;
boolean readWithExactPositionAndLength = randomBoolean();
if (readWithExactPositionAndLength) {
stream = createRetryingInputStream(bytes, position, length);
} else {
stream = createRetryingInputStream(bytes);
}
try (stream) {
var out = new ByteArrayOutputStream();
var readLength = org.elasticsearch.core.Streams.copy(stream, out);
if (readWithExactPositionAndLength) {
assertThat(readLength, equalTo((long) Math.min(length, maxLength)));
assertArrayEquals(out.toByteArray(), Arrays.copyOfRange(bytes, position, position + Math.min(length, maxLength)));
} else {
assertThat(readLength, equalTo((long) bytes.length));
assertArrayEquals(out.toByteArray(), bytes);
}
}
}
public void testReadBeyondBlobLengthThrowsRequestedRangeNotSatisfiedException() {
byte[] bytes = randomByteArrayOfLength(randomIntBetween(1, 512));
int position = bytes.length + randomIntBetween(0, 100);
int length = randomIntBetween(1, 100);
var exception = expectThrows(RequestedRangeNotSatisfiedException.class, () -> {
try (var ignored = createRetryingInputStream(bytes, position, length)) {
fail();
}
});
assertThat(exception.getResource(), equalTo(BLOB_NAME));
assertThat(exception.getPosition(), equalTo((long) position));
assertThat(exception.getLength(), equalTo((long) length));
assertThat(
exception.getMessage(),
equalTo(
String.format(
Locale.ROOT,
"Requested range [position=%d, length=%d] cannot be satisfied for [%s]",
position,
length,
BLOB_NAME
)
)
);
assertThat(exception.getCause(), instanceOf(StorageException.class));
}
private GoogleCloudStorageRetryingInputStream createRetryingInputStream(byte[] data) throws IOException {
HttpTransport transport = getMockHttpTransport(data, 0, data.length);
HttpRequest httpRequest = transport.createRequestFactory().buildGetRequest(HttpTesting.SIMPLE_GENERIC_URL);
HttpResponse httpResponse = httpRequest.execute();
when(get.executeMedia()).thenReturn(httpResponse);
return new GoogleCloudStorageRetryingInputStream(OperationPurpose.SNAPSHOT_DATA, meteredStorage, blobId);
}
private GoogleCloudStorageRetryingInputStream createRetryingInputStream(byte[] data, int position, int length) throws IOException {
if (position >= data.length) {
when(get.executeMedia()).thenThrow(
new StorageException(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus(), "Test range not satisfied")
);
} else {
HttpTransport transport = getMockHttpTransport(data, position, length);
HttpRequest httpRequest = transport.createRequestFactory().buildGetRequest(HttpTesting.SIMPLE_GENERIC_URL);
HttpResponse httpResponse = httpRequest.execute();
when(get.executeMedia()).thenReturn(httpResponse);
}
return new GoogleCloudStorageRetryingInputStream(
OperationPurpose.SNAPSHOT_DATA,
meteredStorage,
blobId,
position,
position + length - 1
);
}
private static HttpTransport getMockHttpTransport(byte[] data, int position, int length) {
InputStream content = new ByteArrayInputStream(data, position, length);
long contentLength = position + length - 1;
HttpTransport transport = new MockHttpTransport() {
@Override
public LowLevelHttpRequest buildRequest(String method, String url) throws IOException {
return new MockLowLevelHttpRequest() {
@Override
public LowLevelHttpResponse execute() throws IOException {
MockLowLevelHttpResponse result = new MockLowLevelHttpResponse();
result.setContent(content);
result.setContentLength(contentLength);
result.setContentType("application/octet-stream");
result.addHeader("x-goog-generation", String.valueOf(randomNonNegativeInt()));
result.setStatusCode(RestStatus.OK.getStatus());
return result;
}
};
}
};
return transport;
}
}
| GoogleCloudStorageRetryingInputStreamTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/JpaEndpointBuilderFactory.java | {
"start": 37201,
"end": 50057
} | interface ____
extends
EndpointConsumerBuilder {
default JpaEndpointConsumerBuilder basic() {
return (JpaEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* This key/value mapping is used for building the query parameters. It
* is expected to be of the generic type java.util.Map where the keys
* are the named parameters of a given JPA query and the values are
* their corresponding effective values you want to select for. When
* it's used for producer, Simple expression can be used as a parameter
* value. It allows you to retrieve parameter values from the message
* body, header and etc. . This is a multi-value option with prefix:
* parameters.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the parameters(String,
* Object) method to add a value (call the method multiple times to set
* more values).
*
* Group: consumer (advanced)
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder parameters(String key, Object value) {
doSetMultiValueProperty("parameters", "parameters." + key, value);
return this;
}
/**
* This key/value mapping is used for building the query parameters. It
* is expected to be of the generic type java.util.Map where the keys
* are the named parameters of a given JPA query and the values are
* their corresponding effective values you want to select for. When
* it's used for producer, Simple expression can be used as a parameter
* value. It allows you to retrieve parameter values from the message
* body, header and etc. . This is a multi-value option with prefix:
* parameters.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the parameters(String,
* Object) method to add a value (call the method multiple times to set
* more values).
*
* Group: consumer (advanced)
*
* @param values the values
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder parameters(Map values) {
doSetMultiValueProperties("parameters", "parameters.", values);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
*
* The option is a:
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
*
* Group: consumer (advanced)
*
* @param pollStrategy the value to set
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder pollStrategy(org.apache.camel.spi.PollingConsumerPollStrategy pollStrategy) {
doSetProperty("pollStrategy", pollStrategy);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
*
* The option will be converted to a
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
*
* Group: consumer (advanced)
*
* @param pollStrategy the value to set
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder pollStrategy(String pollStrategy) {
doSetProperty("pollStrategy", pollStrategy);
return this;
}
/**
* Additional properties for the entity manager to use. This is a
* multi-value option with prefix: emf.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* entityManagerProperties(String, Object) method to add a value (call
* the method multiple times to set more values).
*
* Group: advanced
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder entityManagerProperties(String key, Object value) {
doSetMultiValueProperty("entityManagerProperties", "emf." + key, value);
return this;
}
/**
* Additional properties for the entity manager to use. This is a
* multi-value option with prefix: emf.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* entityManagerProperties(String, Object) method to add a value (call
* the method multiple times to set more values).
*
* Group: advanced
*
* @param values the values
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder entityManagerProperties(Map values) {
doSetMultiValueProperties("entityManagerProperties", "emf.", values);
return this;
}
/**
* Whether to use Spring's SharedEntityManager for the
* consumer/producer. Note in most cases, joinTransaction should be set
* to false as this is not an EXTENDED EntityManager.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param sharedEntityManager the value to set
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder sharedEntityManager(boolean sharedEntityManager) {
doSetProperty("sharedEntityManager", sharedEntityManager);
return this;
}
/**
* Whether to use Spring's SharedEntityManager for the
* consumer/producer. Note in most cases, joinTransaction should be set
* to false as this is not an EXTENDED EntityManager.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param sharedEntityManager the value to set
* @return the dsl builder
*/
default AdvancedJpaEndpointConsumerBuilder sharedEntityManager(String sharedEntityManager) {
doSetProperty("sharedEntityManager", sharedEntityManager);
return this;
}
}
/**
* Builder for endpoint producers for the JPA component.
*/
public | AdvancedJpaEndpointConsumerBuilder |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/ShutdownArgs.java | {
"start": 1488,
"end": 4263
} | class ____ {
/**
* Utility constructor.
*/
private Builder() {
}
/**
* Creates new {@link ShutdownArgs} and setting {@literal SAVE}.
*
* @return new {@link ShutdownArgs} with {@literal SAVE} set.
* @see ShutdownArgs#save(boolean)
*/
public static ShutdownArgs save(boolean save) {
return new ShutdownArgs().save(save);
}
/**
* Creates new {@link ShutdownArgs} and setting {@literal NOW}.
*
* @return new {@link ShutdownArgs} with {@literal NOW} set.
* @see ShutdownArgs#now()
*/
public static ShutdownArgs now() {
return new ShutdownArgs().now();
}
/**
* Creates new {@link ShutdownArgs} and setting {@literal FORCE}.
*
* @return new {@link ShutdownArgs} with {@literal FORCE} set.
* @see ShutdownArgs#force()
*/
public static ShutdownArgs force() {
return new ShutdownArgs().force();
}
/**
* Creates new {@link ShutdownArgs} and setting {@literal ABORT}.
*
* @return new {@link ShutdownArgs} with {@literal ABORT} set.
* @see ShutdownArgs#abort()
*/
public static ShutdownArgs abort() {
return new ShutdownArgs().abort();
}
}
/**
* Will force a DB saving operation even if no save points are configured.
*
* @param save {@code true} force save operation.
* @return {@code this}
*/
public ShutdownArgs save(boolean save) {
this.save = save;
return this;
}
/**
* Skips waiting for lagging replicas, i.e. it bypasses the first step in the shutdown sequence.
*
* @return {@code this}
*/
public ShutdownArgs now() {
this.now = true;
return this;
}
/**
* Ignores any errors that would normally prevent the server from exiting.
*
* @return {@code this}
*/
public ShutdownArgs force() {
this.force = true;
return this;
}
/**
* Cancels an ongoing shutdown and cannot be combined with other flags.
*
* @return {@code this}
*/
public ShutdownArgs abort() {
this.abort = true;
return this;
}
@Override
public <K, V> void build(CommandArgs<K, V> args) {
if (save) {
args.add(CommandKeyword.SAVE);
} else {
args.add(CommandKeyword.NOSAVE);
}
if (now) {
args.add("NOW");
}
if (force) {
args.add(CommandKeyword.FORCE);
}
if (abort) {
args.add("ABORT");
}
}
}
| Builder |
java | quarkusio__quarkus | extensions/jaxb/deployment/src/main/java/io/quarkus/jaxb/deployment/JaxbProcessor.java | {
"start": 4138,
"end": 26905
} | class ____ {
private static Logger LOG = Logger.getLogger(JaxbProcessor.class);
private static final List<Class<? extends Annotation>> JAXB_ANNOTATIONS = List.of(
XmlAccessorType.class,
XmlAnyAttribute.class,
XmlAnyElement.class,
XmlAttachmentRef.class,
XmlAttribute.class,
XmlElement.class,
XmlElementDecl.class,
XmlElementRef.class,
XmlElementRefs.class,
XmlElements.class,
XmlElementWrapper.class,
XmlEnum.class,
XmlEnumValue.class,
XmlID.class,
XmlIDREF.class,
XmlInlineBinaryData.class,
XmlList.class,
XmlMimeType.class,
XmlMixed.class,
XmlNs.class,
XmlRegistry.class,
XmlRootElement.class,
XmlSchema.class,
XmlSchemaType.class,
XmlSchemaTypes.class,
XmlSeeAlso.class,
XmlTransient.class,
XmlType.class,
XmlValue.class,
XmlJavaTypeAdapter.class,
XmlJavaTypeAdapters.class);
private static final List<Class<?>> JAXB_REFLECTIVE_CLASSES = List.of(XmlAccessOrder.class);
private static final DotName XML_ROOT_ELEMENT = DotName.createSimple(XmlRootElement.class.getName());
private static final DotName XML_TYPE = DotName.createSimple(XmlType.class.getName());
private static final DotName XML_REGISTRY = DotName.createSimple(XmlRegistry.class.getName());
private static final DotName XML_SCHEMA = DotName.createSimple(XmlSchema.class.getName());
private static final DotName XML_JAVA_TYPE_ADAPTER = DotName.createSimple(XmlJavaTypeAdapter.class.getName());
private static final DotName XML_ANY_ELEMENT = DotName.createSimple(XmlAnyElement.class.getName());
private static final DotName XML_SEE_ALSO = DotName.createSimple(XmlSeeAlso.class.getName());
private static final DotName XML_TRANSIENT = DotName.createSimple(XmlTransient.class.getName());
private static final DotName XML_ACCESSOR_TYPE = DotName.createSimple(XmlAccessorType.class.getName());
private static final List<DotName> JAXB_ROOT_ANNOTATIONS = List.of(XML_ROOT_ELEMENT, XML_TYPE, XML_REGISTRY);
private static final List<DotName> IGNORE_TYPES = List.of(DotName.createSimple("javax.xml.datatype.XMLGregorianCalendar"));
private static final List<String> NATIVE_PROXY_DEFINITIONS = List.of(
"org.glassfish.jaxb.core.marshaller.CharacterEscapeHandler",
"com.sun.xml.txw2.output.CharacterEscapeHandler",
"org.glassfish.jaxb.core.v2.schemagen.episode.Bindings",
"org.glassfish.jaxb.core.v2.schemagen.episode.SchemaBindings",
"org.glassfish.jaxb.core.v2.schemagen.episode.Klass",
"org.glassfish.jaxb.core.v2.schemagen.episode.Package",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Annotated",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Annotation",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Any",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Appinfo",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.AttrDecls",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.AttributeType",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.ComplexContent",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.ComplexExtension",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.ComplexRestriction",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.ComplexType",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.ComplexTypeHost",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.ComplexTypeModel",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.ContentModelContainer",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Documentation",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Element",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.ExplicitGroup",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.ExtensionType",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.FixedOrDefault",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Import",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.List",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.LocalAttribute",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.LocalElement",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.NestedParticle",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.NoFixedFacet",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Occurs",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Particle",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Redefinable",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Schema",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.SchemaTop",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.SimpleContent",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.SimpleDerivation",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.SimpleExtension",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.SimpleRestriction",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.SimpleRestrictionModel",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.SimpleType",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.SimpleTypeHost",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.TopLevelAttribute",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.TopLevelElement",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.TypeDefParticle",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.TypeHost",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Union",
"org.glassfish.jaxb.runtime.v2.schemagen.xmlschema.Wildcard",
"com.sun.xml.txw2.TypedXmlWriter");
@BuildStep
void processAnnotationsAndIndexFiles(
BuildProducer<NativeImageSystemPropertyBuildItem> nativeImageProps,
BuildProducer<ServiceProviderBuildItem> providerItem,
BuildProducer<NativeImageProxyDefinitionBuildItem> proxyDefinitions,
CombinedIndexBuildItem combinedIndexBuildItem,
List<JaxbFileRootBuildItem> fileRoots,
BuildProducer<ReflectiveHierarchyBuildItem> reflectiveHierarchies,
BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
BuildProducer<NativeImageResourceBuildItem> resource,
BuildProducer<NativeImageResourceBundleBuildItem> resourceBundle,
BuildProducer<RuntimeInitializedClassBuildItem> runtimeClasses,
BuildProducer<JaxbClassesToBeBoundBuildItem> classesToBeBoundProducer,
ApplicationArchivesBuildItem applicationArchivesBuildItem) throws ClassNotFoundException {
List<String> classesToBeBound = new ArrayList<>();
IndexView index = combinedIndexBuildItem.getIndex();
// Register classes for reflection based on JAXB annotations
boolean jaxbRootAnnotationsDetected = false;
for (DotName jaxbRootAnnotation : JAXB_ROOT_ANNOTATIONS) {
for (AnnotationInstance jaxbRootAnnotationInstance : index
.getAnnotations(jaxbRootAnnotation)) {
if (jaxbRootAnnotationInstance.target().kind() == Kind.CLASS
&& !JAXB_ANNOTATIONS.contains(jaxbRootAnnotationInstance.target().asClass().getClass())) {
ClassInfo targetClassInfo = jaxbRootAnnotationInstance.target().asClass();
final var name = targetClassInfo.name();
reflectiveHierarchies.produce(ReflectiveHierarchyBuildItem
.builder(name)
.index(index)
.ignoreTypePredicate(t -> ReflectiveHierarchyBuildItem.DefaultIgnoreTypePredicate.INSTANCE.test(t)
|| IGNORE_TYPES.contains(t))
.ignoreFieldPredicate(JaxbProcessor::isFieldIgnored)
.ignoreMethodPredicate(JaxbProcessor::isMethodIgnored)
.source(getClass().getSimpleName() + " annotated with @" + jaxbRootAnnotation + " > " + name)
.build());
classesToBeBound.add(targetClassInfo.name().toString());
jaxbRootAnnotationsDetected = true;
}
}
}
if (!jaxbRootAnnotationsDetected && fileRoots.isEmpty()) {
return;
}
// Register package-infos for reflection
for (AnnotationInstance xmlSchemaInstance : index.getAnnotations(XML_SCHEMA)) {
if (xmlSchemaInstance.target().kind() == Kind.CLASS) {
String className = xmlSchemaInstance.target().asClass().name().toString();
reflectiveClass.produce(ReflectiveClassBuildItem.builder(className)
.reason(getClass().getName() + " annotated with @" + XML_SCHEMA)
.build());
}
}
// Register XML Java type adapters for reflection
for (AnnotationInstance xmlJavaTypeAdapterInstance : index.getAnnotations(XML_JAVA_TYPE_ADAPTER)) {
reflectiveClass.produce(
ReflectiveClassBuildItem.builder(xmlJavaTypeAdapterInstance.value().asClass().name().toString())
.reason(getClass().getName() + " @" + XML_JAVA_TYPE_ADAPTER + " value")
.methods().fields().build());
}
if (!index.getAnnotations(XML_ANY_ELEMENT).isEmpty()) {
reflectiveClass.produce(ReflectiveClassBuildItem.builder("jakarta.xml.bind.annotation.W3CDomHandler")
.reason(getClass().getName() + " @" + XML_ANY_ELEMENT + " annotation present")
.build());
}
JAXB_ANNOTATIONS.stream()
.map(Class::getName)
.forEach(className -> {
reflectiveClass.produce(ReflectiveClassBuildItem.builder(className)
.reason(getClass().getName() + " JAXB annotation")
.methods().build());
});
// Register @XmlSeeAlso
proxyDefinitions.produce(new NativeImageProxyDefinitionBuildItem(XmlSeeAlso.class.getName(),
"org.glassfish.jaxb.core.v2.model.annotation.Locatable"));
for (AnnotationInstance xmlSeeAlsoAnn : index.getAnnotations(XML_SEE_ALSO)) {
AnnotationValue value = xmlSeeAlsoAnn.value();
Type[] types = value.asClassArray();
for (Type t : types) {
reflectiveClass.produce(ReflectiveClassBuildItem.builder(t.name().toString())
.reason(getClass().getName() + " @" + XML_SEE_ALSO + " value")
.build());
}
}
// Register Native proxy definitions
for (String s : NATIVE_PROXY_DEFINITIONS) {
proxyDefinitions.produce(new NativeImageProxyDefinitionBuildItem(s));
}
for (JaxbFileRootBuildItem i : fileRoots) {
iterateResources(applicationArchivesBuildItem, i.getFileRoot(), resource, reflectiveClass, classesToBeBound);
}
classesToBeBoundProducer.produce(new JaxbClassesToBeBoundBuildItem(classesToBeBound));
}
@BuildStep
void ignoreWarnings(BuildProducer<ReflectiveHierarchyIgnoreWarningBuildItem> ignoreWarningProducer) {
for (DotName type : IGNORE_TYPES) {
ignoreWarningProducer.produce(new ReflectiveHierarchyIgnoreWarningBuildItem(type));
}
}
@BuildStep
void registerClasses(
BuildProducer<NativeImageSystemPropertyBuildItem> nativeImageProps,
BuildProducer<ServiceProviderBuildItem> providerItem,
BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
BuildProducer<NativeImageResourceBundleBuildItem> resourceBundle) {
reflectiveClass.produce(ReflectiveClassBuildItem.builder(
"org.glassfish.jaxb.runtime.v2.ContextFactory",
"com.sun.xml.internal.stream.XMLInputFactoryImpl",
"com.sun.xml.internal.stream.XMLOutputFactoryImpl",
"com.sun.org.apache.xpath.internal.functions.FuncNot",
"com.sun.org.apache.xerces.internal.impl.dv.xs.SchemaDVFactoryImpl")
.reason(getClass().getName())
.methods().build());
addResourceBundle(resourceBundle, "jakarta.xml.bind.Messages");
addResourceBundle(resourceBundle, "jakarta.xml.bind.helpers.Messages");
nativeImageProps
.produce(new NativeImageSystemPropertyBuildItem("com.sun.xml.bind.v2.bytecode.ClassTailor.noOptimize", "true"));
JAXB_REFLECTIVE_CLASSES.stream()
.map(Class::getName)
.forEach(className -> reflectiveClass.produce(ReflectiveClassBuildItem.builder(className)
.reason(getClass().getName() + " JAXB reflective class")
.methods().build()));
providerItem
.produce(new ServiceProviderBuildItem(JAXBContext.class.getName(),
"org.glassfish.jaxb.runtime.v2.ContextFactory"));
}
@BuildStep
FilteredJaxbClassesToBeBoundBuildItem filterBoundClasses(
JaxbConfig config,
List<JaxbClassesToBeBoundBuildItem> classesToBeBoundBuildItems) {
FilteredJaxbClassesToBeBoundBuildItem.Builder builder = FilteredJaxbClassesToBeBoundBuildItem.builder();
classesToBeBoundBuildItems.stream()
.map(JaxbClassesToBeBoundBuildItem::getClasses)
.forEach(builder::classNames);
// remove classes that have been excluded by users
if (config.excludeClasses().isPresent()) {
builder.classNameExcludes(config.excludeClasses().get());
}
return builder.build();
}
@BuildStep
@Record(ExecutionTime.STATIC_INIT)
void bindClassesToJaxbContext(
JaxbConfig config,
FilteredJaxbClassesToBeBoundBuildItem filteredClassesToBeBound,
SynthesisFinishedBuildItem beanContainerState,
JaxbContextConfigRecorder jaxbContextConfig /* Force the build time container to invoke this method */) {
jaxbContextConfig.reset();
final BeanResolver beanResolver = beanContainerState.getBeanResolver();
final Set<BeanInfo> beans = beanResolver
.resolveBeans(Type.create(DotName.createSimple(JAXBContext.class), org.jboss.jandex.Type.Kind.CLASS));
if (!beans.isEmpty()) {
jaxbContextConfig.addClassesToBeBound(filteredClassesToBeBound.getClasses());
if (config.validateJaxbContext()) {
validateJaxbContext(filteredClassesToBeBound, beanResolver, beans);
}
}
}
@BuildStep
void registerProduces(BuildProducer<AdditionalBeanBuildItem> additionalBeans) {
additionalBeans.produce(new AdditionalBeanBuildItem(JaxbContextProducer.class));
}
private void validateJaxbContext(FilteredJaxbClassesToBeBoundBuildItem filteredClassesToBeBound, BeanResolver beanResolver,
Set<BeanInfo> beans) {
final BeanInfo bean = beanResolver.resolveAmbiguity(beans);
if (bean.isDefaultBean()) {
/*
* Validate the default JAXB context at build time and fail early.
* Do this only if the user application actually requires the default JAXBContext bean
*/
try {
JAXBContext.newInstance(filteredClassesToBeBound.getClasses().toArray(new Class[0]));
} catch (JAXBException e) {
/*
* Producing a ValidationErrorBuildItem would perhaps be more natural here,
* but doing so causes a cycle between this and reactive JAXB extension
* Throwing from here works well too
*/
throw new DeploymentException("Failed to create or validate the default JAXBContext", e);
}
}
}
private void handleJaxbFile(Path p, BuildProducer<NativeImageResourceBuildItem> resource,
BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
List<String> classesToBeBound) {
try {
String path = p.toAbsolutePath().toString().substring(1);
String pkg = p.toAbsolutePath().getParent().toString().substring(1)
.replace(p.getFileSystem().getSeparator(), ".") + ".";
resource.produce(new NativeImageResourceBuildItem(path));
ArrayList<Class> classes = new ArrayList<>();
for (String line : Files.readAllLines(p)) {
line = line.trim();
if (!line.isEmpty() && !line.startsWith("#")) {
String clazz = pkg + line;
Class<?> cl = Class.forName(clazz, false, Thread.currentThread().getContextClassLoader());
classesToBeBound.add(clazz);
while (cl != Object.class) {
classes.add(cl);
cl = cl.getSuperclass();
}
}
}
reflectiveClass.produce(ReflectiveClassBuildItem.builder(classes.toArray(new Class[0]))
.reason(getClass().getName() + " jaxb.index file " + path)
.methods().fields().build());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void iterateResources(ApplicationArchivesBuildItem applicationArchivesBuildItem, String path,
BuildProducer<NativeImageResourceBuildItem> resource, BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
List<String> classesToBeBound) {
for (ApplicationArchive archive : applicationArchivesBuildItem.getAllApplicationArchives()) {
archive.accept(tree -> {
var arch = tree.getPath(path);
if (arch != null && Files.isDirectory(arch)) {
JaxbProcessor.safeWalk(arch)
.filter(Files::isRegularFile)
.filter(p -> p.getFileName().toString().equals("jaxb.index"))
.forEach(p1 -> handleJaxbFile(p1, resource, reflectiveClass, classesToBeBound));
}
});
}
}
public static Stream<Path> safeWalk(Path p) {
try {
return Files.walk(p);
} catch (IOException e) {
throw new IOError(e);
}
}
private void addResourceBundle(BuildProducer<NativeImageResourceBundleBuildItem> resourceBundle, String bundle) {
resourceBundle.produce(new NativeImageResourceBundleBuildItem(bundle));
}
private static boolean isFieldIgnored(FieldInfo fieldInfo) {
// see JakartaXmlBindingAnnotationIntrospector#isVisible(AnnotatedField f)
// and XmlAccessType
if (fieldInfo.hasAnnotation(XML_TRANSIENT)) {
return true;
}
if (Modifier.isStatic(fieldInfo.flags())) {
return true;
}
for (Class<? extends Annotation> jaxbAnnotation : JAXB_ANNOTATIONS) {
if (fieldInfo.hasAnnotation(jaxbAnnotation)) {
return true;
}
}
ClassInfo declaringClass = fieldInfo.declaringClass();
XmlAccessType xmlAccessType = getXmlAccessType(declaringClass);
switch (xmlAccessType) {
case FIELD:
return false;
case PROPERTY:
return true;
case PUBLIC_MEMBER:
return !Modifier.isPublic(fieldInfo.flags());
case NONE:
return true;
default:
return true;
}
}
private static boolean isMethodIgnored(MethodInfo methodInfo) {
// see JakartaXmlBindingAnnotationIntrospector#isVisible(AnnotatedMethod m)
// and XmlAccessType
MethodInfo getterSetterCounterpart = getGetterSetterCounterPart(methodInfo);
if (methodInfo.hasAnnotation(XML_TRANSIENT) ||
(getterSetterCounterpart != null && getterSetterCounterpart.hasAnnotation(XML_TRANSIENT))) {
return true;
}
if (Modifier.isStatic(methodInfo.flags())) {
return true;
}
// if method has a JAXB annotation, we consider it
for (Class<? extends Annotation> jaxbAnnotation : JAXB_ANNOTATIONS) {
if (methodInfo.hasAnnotation(jaxbAnnotation)) {
return false;
}
}
ClassInfo declaringClass = methodInfo.declaringClass();
XmlAccessType xmlAccessType = getXmlAccessType(declaringClass);
switch (xmlAccessType) {
case FIELD:
return true;
case PROPERTY:
case PUBLIC_MEMBER:
return !Modifier.isPublic(methodInfo.flags());
case NONE:
return true;
default:
return true;
}
}
private static MethodInfo getGetterSetterCounterPart(MethodInfo methodInfo) {
if (!methodInfo.name().startsWith("get") || methodInfo.parametersCount() > 0) {
return null;
}
return methodInfo.declaringClass().method(methodInfo.name().replaceFirst("get", "set"), methodInfo.returnType());
}
private static XmlAccessType getXmlAccessType(ClassInfo classInfo) {
AnnotationInstance xmlAccessorTypeAi = classInfo.annotation(XML_ACCESSOR_TYPE);
if (xmlAccessorTypeAi == null) {
return XmlAccessType.PUBLIC_MEMBER;
}
AnnotationValue xmlAccessorType = xmlAccessorTypeAi.value();
if (xmlAccessorType == null) {
return XmlAccessType.PUBLIC_MEMBER;
}
return XmlAccessType.valueOf(xmlAccessorTypeAi.value().asEnum());
}
@BuildStep(onlyIf = NativeOrNativeSourcesBuild.class)
void jaxbIndex(final BuildProducer<NativeImageResourcePatternsBuildItem> resource) {
LOG.debug("adding jaxb.index to native image resources");
resource.produce(NativeImageResourcePatternsBuildItem.builder().includeGlob("**/jaxb.index").build());
}
}
| JaxbProcessor |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointSettingsSerializableTest.java | {
"start": 6750,
"end": 7530
} | class ____ implements CheckpointStorage {
private static final long serialVersionUID = -6107964383429395816L;
/** Simulate a custom option that is not in the normal classpath. */
@SuppressWarnings("unused")
private Serializable customOption;
public CustomCheckpointStorage(Serializable customOption) {
this.customOption = customOption;
}
@Override
public CompletedCheckpointStorageLocation resolveCheckpoint(String pointer) {
throw new UnsupportedOperationException();
}
@Override
public CheckpointStorageAccess createCheckpointStorage(JobID jobId) throws IOException {
return mock(CheckpointStorageAccess.class);
}
}
}
| CustomCheckpointStorage |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/JoinWithSolutionSetSecondDriver.java | {
"start": 1726,
"end": 3359
} | class ____<IT1, IT2, OT>
implements ResettableDriver<FlatJoinFunction<IT1, IT2, OT>, OT> {
private TaskContext<FlatJoinFunction<IT1, IT2, OT>, OT> taskContext;
private CompactingHashTable<IT2> hashTable;
private JoinHashMap<IT2> objectMap;
private TypeComparator<IT1> probeSideComparator;
private TypePairComparator<IT1, IT2> pairComparator;
private IT2 solutionSideRecord;
private IT1 probeSideRecord;
protected volatile boolean running;
private boolean objectReuseEnabled = false;
// --------------------------------------------------------------------------------------------
@Override
public void setup(TaskContext<FlatJoinFunction<IT1, IT2, OT>, OT> context) {
this.taskContext = context;
this.running = true;
}
@Override
public int getNumberOfInputs() {
return 1;
}
@Override
public Class<FlatJoinFunction<IT1, IT2, OT>> getStubType() {
@SuppressWarnings("unchecked")
final Class<FlatJoinFunction<IT1, IT2, OT>> clazz =
(Class<FlatJoinFunction<IT1, IT2, OT>>) (Class<?>) FlatJoinFunction.class;
return clazz;
}
@Override
public int getNumberOfDriverComparators() {
return 1;
}
@Override
public boolean isInputResettable(int inputNum) {
if (inputNum < 0 || inputNum > 1) {
throw new IndexOutOfBoundsException();
}
// from the perspective of the task that runs this operator, there is only one input, which
// is not resettable
// we implement the resettable | JoinWithSolutionSetSecondDriver |
java | grpc__grpc-java | okhttp/third_party/okhttp/main/java/io/grpc/okhttp/internal/Headers.java | {
"start": 1325,
"end": 3039
} | class ____ {
private final String[] namesAndValues;
private Headers(Builder builder) {
this.namesAndValues = builder.namesAndValues.toArray(new String[builder.namesAndValues.size()]);
}
/** Returns the last value corresponding to the specified field, or null. */
public String get(String name) {
return get(namesAndValues, name);
}
/** Returns the number of field values. */
public int size() {
return namesAndValues.length / 2;
}
/** Returns the field at {@code position} or null if that is out of range. */
public String name(int index) {
int nameIndex = index * 2;
if (nameIndex < 0 || nameIndex >= namesAndValues.length) {
return null;
}
return namesAndValues[nameIndex];
}
/** Returns the value at {@code index} or null if that is out of range. */
public String value(int index) {
int valueIndex = index * 2 + 1;
if (valueIndex < 0 || valueIndex >= namesAndValues.length) {
return null;
}
return namesAndValues[valueIndex];
}
public Builder newBuilder() {
Builder result = new Builder();
Collections.addAll(result.namesAndValues, namesAndValues);
return result;
}
@Override public String toString() {
StringBuilder result = new StringBuilder();
for (int i = 0, size = size(); i < size; i++) {
result.append(name(i)).append(": ").append(value(i)).append("\n");
}
return result.toString();
}
private static String get(String[] namesAndValues, String name) {
for (int i = namesAndValues.length - 2; i >= 0; i -= 2) {
if (name.equalsIgnoreCase(namesAndValues[i])) {
return namesAndValues[i + 1];
}
}
return null;
}
public static final | Headers |
java | grpc__grpc-java | contextstorage/src/main/java/io/grpc/override/ContextStorageOverride.java | {
"start": 675,
"end": 1050
} | class ____ your dependencies will override the default gRPC context storage using
* reflection. It is a bridge between {@link io.grpc.Context} and
* {@link io.opentelemetry.context.Context}, i.e. propagating io.grpc.context.Context also
* propagates io.opentelemetry.context, and propagating io.opentelemetry.context will also propagate
* io.grpc.context.
*/
public final | in |
java | spring-projects__spring-security | web/src/test/java/org/springframework/security/web/server/csrf/CsrfWebFilterTests.java | {
"start": 2371,
"end": 16696
} | class ____ {
@Mock
private WebFilterChain chain;
@Mock
private ServerCsrfTokenRepository repository;
private CsrfToken token = new DefaultCsrfToken("csrf", "CSRF", "a");
private CsrfWebFilter csrfFilter = new CsrfWebFilter();
private MockServerWebExchange get = MockServerWebExchange.from(MockServerHttpRequest.get("/"));
private MockServerWebExchange post = MockServerWebExchange.from(MockServerHttpRequest.post("/"));
@Test
public void setRequestHandlerWhenNullThenThrowsIllegalArgumentException() {
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> this.csrfFilter.setRequestHandler(null))
.withMessage("requestHandler cannot be null");
// @formatter:on
}
@Test
public void filterWhenGetThenSessionNotCreatedAndChainContinues() {
PublisherProbe<Void> chainResult = PublisherProbe.empty();
given(this.chain.filter(this.get)).willReturn(chainResult.mono());
Mono<Void> result = this.csrfFilter.filter(this.get, this.chain);
StepVerifier.create(result).verifyComplete();
Mono<Boolean> isSessionStarted = this.get.getSession().map(WebSession::isStarted);
StepVerifier.create(isSessionStarted).expectNext(false).verifyComplete();
chainResult.assertWasSubscribed();
}
@Test
public void filterWhenPostAndNoTokenThenCsrfException() {
Mono<Void> result = this.csrfFilter.filter(this.post, this.chain);
StepVerifier.create(result).verifyComplete();
assertThat(this.post.getResponse().getStatusCode()).isEqualTo(HttpStatus.FORBIDDEN);
}
@Test
public void filterWhenPostAndEstablishedCsrfTokenAndRequestMissingTokenThenCsrfException() {
this.csrfFilter.setCsrfTokenRepository(this.repository);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
Mono<Void> result = this.csrfFilter.filter(this.post, this.chain);
StepVerifier.create(result).verifyComplete();
assertThat(this.post.getResponse().getStatusCode()).isEqualTo(HttpStatus.FORBIDDEN);
StepVerifier.create(this.post.getResponse().getBodyAsString())
.assertNext((body) -> assertThat(body).contains("An expected CSRF token cannot be found"));
}
@Test
public void filterWhenPostAndEstablishedCsrfTokenAndRequestParamInvalidTokenThenCsrfException() {
this.csrfFilter.setCsrfTokenRepository(this.repository);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
this.post = MockServerWebExchange.from(MockServerHttpRequest.post("/")
.body(this.token.getParameterName() + "=" + this.token.getToken() + "INVALID"));
Mono<Void> result = this.csrfFilter.filter(this.post, this.chain);
StepVerifier.create(result).verifyComplete();
assertThat(this.post.getResponse().getStatusCode()).isEqualTo(HttpStatus.FORBIDDEN);
}
@Test
public void filterWhenPostAndEstablishedCsrfTokenAndRequestParamValidTokenThenContinues() {
PublisherProbe<Void> chainResult = PublisherProbe.empty();
given(this.chain.filter(any())).willReturn(chainResult.mono());
this.csrfFilter.setCsrfTokenRepository(this.repository);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
given(this.repository.generateToken(any())).willReturn(Mono.just(this.token));
CsrfToken csrfToken = createXorCsrfToken();
this.post = MockServerWebExchange.from(MockServerHttpRequest.post("/")
.contentType(MediaType.APPLICATION_FORM_URLENCODED)
.body(csrfToken.getParameterName() + "=" + csrfToken.getToken()));
Mono<Void> result = this.csrfFilter.filter(this.post, this.chain);
StepVerifier.create(result).verifyComplete();
chainResult.assertWasSubscribed();
}
@Test
public void filterWhenPostAndEstablishedCsrfTokenAndHeaderInvalidTokenThenCsrfException() {
this.csrfFilter.setCsrfTokenRepository(this.repository);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
this.post = MockServerWebExchange.from(
MockServerHttpRequest.post("/").header(this.token.getHeaderName(), this.token.getToken() + "INVALID"));
Mono<Void> result = this.csrfFilter.filter(this.post, this.chain);
StepVerifier.create(result).verifyComplete();
assertThat(this.post.getResponse().getStatusCode()).isEqualTo(HttpStatus.FORBIDDEN);
}
@Test
public void filterWhenPostAndEstablishedCsrfTokenAndHeaderValidTokenThenContinues() {
PublisherProbe<Void> chainResult = PublisherProbe.empty();
given(this.chain.filter(any())).willReturn(chainResult.mono());
this.csrfFilter.setCsrfTokenRepository(this.repository);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
given(this.repository.generateToken(any())).willReturn(Mono.just(this.token));
CsrfToken csrfToken = createXorCsrfToken();
this.post = MockServerWebExchange
.from(MockServerHttpRequest.post("/").header(csrfToken.getHeaderName(), csrfToken.getToken()));
Mono<Void> result = this.csrfFilter.filter(this.post, this.chain);
StepVerifier.create(result).verifyComplete();
chainResult.assertWasSubscribed();
}
@Test
public void filterWhenRequestHandlerSetThenUsed() {
ServerCsrfTokenRequestHandler requestHandler = mock(ServerCsrfTokenRequestHandler.class);
given(requestHandler.resolveCsrfTokenValue(any(ServerWebExchange.class), any(CsrfToken.class)))
.willReturn(Mono.just(this.token.getToken()));
this.csrfFilter.setRequestHandler(requestHandler);
PublisherProbe<Void> chainResult = PublisherProbe.empty();
given(this.chain.filter(any())).willReturn(chainResult.mono());
this.csrfFilter.setCsrfTokenRepository(this.repository);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
given(this.repository.generateToken(any())).willReturn(Mono.just(this.token));
this.post = MockServerWebExchange
.from(MockServerHttpRequest.post("/").header(this.token.getHeaderName(), this.token.getToken()));
Mono<Void> result = this.csrfFilter.filter(this.post, this.chain);
StepVerifier.create(result).verifyComplete();
chainResult.assertWasSubscribed();
verify(requestHandler).handle(eq(this.post), any());
verify(requestHandler).resolveCsrfTokenValue(this.post, this.token);
}
@Test
public void filterWhenXorServerCsrfTokenRequestAttributeHandlerAndValidTokenThenSuccess() {
PublisherProbe<Void> chainResult = PublisherProbe.empty();
given(this.chain.filter(any())).willReturn(chainResult.mono());
this.csrfFilter.setCsrfTokenRepository(this.repository);
given(this.repository.generateToken(any())).willReturn(Mono.just(this.token));
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
CsrfToken csrfToken = createXorCsrfToken();
this.post = MockServerWebExchange
.from(MockServerHttpRequest.post("/").header(csrfToken.getHeaderName(), csrfToken.getToken()));
StepVerifier.create(this.csrfFilter.filter(this.post, this.chain)).verifyComplete();
chainResult.assertWasSubscribed();
}
@Test
public void filterWhenXorServerCsrfTokenRequestAttributeHandlerAndRawTokenThenAccessDeniedException() {
PublisherProbe<Void> chainResult = PublisherProbe.empty();
this.csrfFilter.setCsrfTokenRepository(this.repository);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
XorServerCsrfTokenRequestAttributeHandler requestHandler = new XorServerCsrfTokenRequestAttributeHandler();
this.csrfFilter.setRequestHandler(requestHandler);
this.post = MockServerWebExchange
.from(MockServerHttpRequest.post("/").header(this.token.getHeaderName(), this.token.getToken()));
Mono<Void> result = this.csrfFilter.filter(this.post, this.chain);
StepVerifier.create(result).verifyComplete();
chainResult.assertWasNotSubscribed();
assertThat(this.post.getResponse().getStatusCode()).isEqualTo(HttpStatus.FORBIDDEN);
}
@Test
// gh-8452
public void matchesRequireCsrfProtectionWhenNonStandardHTTPMethodIsUsed() {
MockServerWebExchange nonStandardHttpExchange = MockServerWebExchange
.from(MockServerHttpRequest.method(HttpMethod.valueOf("non-standard-http-method"), "/"));
ServerWebExchangeMatcher serverWebExchangeMatcher = CsrfWebFilter.DEFAULT_CSRF_MATCHER;
assertThat(serverWebExchangeMatcher.matches(nonStandardHttpExchange).map(MatchResult::isMatch).block())
.isTrue();
}
@Test
public void doFilterWhenSkipExchangeInvokedThenSkips() {
PublisherProbe<Void> chainResult = PublisherProbe.empty();
given(this.chain.filter(any())).willReturn(chainResult.mono());
ServerWebExchangeMatcher matcher = mock(ServerWebExchangeMatcher.class);
this.csrfFilter.setRequireCsrfProtectionMatcher(matcher);
MockServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.post("/post").build());
CsrfWebFilter.skipExchange(exchange);
this.csrfFilter.filter(exchange, this.chain).block();
verifyNoMoreInteractions(matcher);
}
@Test
public void filterWhenMultipartFormDataAndNotEnabledThenDenied() {
this.csrfFilter.setCsrfTokenRepository(this.repository);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
WebTestClient client = WebTestClient.bindToController(new OkController()).webFilter(this.csrfFilter).build();
client.post()
.uri("/")
.contentType(MediaType.MULTIPART_FORM_DATA)
.body(BodyInserters.fromMultipartData(this.token.getParameterName(), this.token.getToken()))
.exchange()
.expectStatus()
.isForbidden();
}
@Test
public void filterWhenMultipartFormDataAndEnabledThenGranted() {
this.csrfFilter.setCsrfTokenRepository(this.repository);
ServerCsrfTokenRequestAttributeHandler requestHandler = new ServerCsrfTokenRequestAttributeHandler();
requestHandler.setTokenFromMultipartDataEnabled(true);
this.csrfFilter.setRequestHandler(requestHandler);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
given(this.repository.generateToken(any())).willReturn(Mono.just(this.token));
WebTestClient client = WebTestClient.bindToController(new OkController()).webFilter(this.csrfFilter).build();
client.post()
.uri("/")
.contentType(MediaType.MULTIPART_FORM_DATA)
.body(BodyInserters.fromMultipartData(this.token.getParameterName(), this.token.getToken()))
.exchange()
.expectStatus()
.is2xxSuccessful();
}
@Test
public void filterWhenPostAndMultipartFormDataEnabledAndNoBodyProvided() {
this.csrfFilter.setCsrfTokenRepository(this.repository);
ServerCsrfTokenRequestAttributeHandler requestHandler = new ServerCsrfTokenRequestAttributeHandler();
requestHandler.setTokenFromMultipartDataEnabled(true);
this.csrfFilter.setRequestHandler(requestHandler);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
given(this.repository.generateToken(any())).willReturn(Mono.just(this.token));
WebTestClient client = WebTestClient.bindToController(new OkController()).webFilter(this.csrfFilter).build();
client.post()
.uri("/")
.header(this.token.getHeaderName(), this.token.getToken())
.exchange()
.expectStatus()
.is2xxSuccessful();
}
@Test
public void filterWhenFormDataAndEnabledThenGranted() {
this.csrfFilter.setCsrfTokenRepository(this.repository);
ServerCsrfTokenRequestAttributeHandler requestHandler = new ServerCsrfTokenRequestAttributeHandler();
requestHandler.setTokenFromMultipartDataEnabled(true);
this.csrfFilter.setRequestHandler(requestHandler);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
given(this.repository.generateToken(any())).willReturn(Mono.just(this.token));
WebTestClient client = WebTestClient.bindToController(new OkController()).webFilter(this.csrfFilter).build();
client.post()
.uri("/")
.contentType(MediaType.APPLICATION_FORM_URLENCODED)
.bodyValue(this.token.getParameterName() + "=" + this.token.getToken())
.exchange()
.expectStatus()
.is2xxSuccessful();
}
@Test
public void filterWhenMultipartMixedAndEnabledThenNotRead() {
this.csrfFilter.setCsrfTokenRepository(this.repository);
ServerCsrfTokenRequestAttributeHandler requestHandler = new ServerCsrfTokenRequestAttributeHandler();
requestHandler.setTokenFromMultipartDataEnabled(true);
this.csrfFilter.setRequestHandler(requestHandler);
given(this.repository.loadToken(any())).willReturn(Mono.just(this.token));
WebTestClient client = WebTestClient.bindToController(new OkController()).webFilter(this.csrfFilter).build();
client.post()
.uri("/")
.contentType(MediaType.MULTIPART_MIXED)
.bodyValue(this.token.getParameterName() + "=" + this.token.getToken())
.exchange()
.expectStatus()
.isForbidden();
}
// gh-9561
@Test
public void doFilterWhenTokenIsNullThenNoNullPointer() {
this.csrfFilter.setCsrfTokenRepository(this.repository);
CsrfToken token = mock(CsrfToken.class);
given(token.getToken()).willReturn(null);
given(token.getHeaderName()).willReturn(this.token.getHeaderName());
given(token.getParameterName()).willReturn(this.token.getParameterName());
given(this.repository.loadToken(any())).willReturn(Mono.just(token));
WebTestClient client = WebTestClient.bindToController(new OkController()).webFilter(this.csrfFilter).build();
client.post()
.uri("/")
.contentType(MediaType.APPLICATION_FORM_URLENCODED)
.bodyValue(this.token.getParameterName() + "=" + this.token.getToken())
.exchange()
.expectStatus()
.isForbidden();
}
// gh-9113
@Test
public void filterWhenSubscribingCsrfTokenMultipleTimesThenGenerateOnlyOnce() {
PublisherProbe<CsrfToken> chainResult = PublisherProbe.empty();
this.csrfFilter.setCsrfTokenRepository(this.repository);
given(this.repository.loadToken(any())).willReturn(Mono.empty());
given(this.repository.generateToken(any())).willReturn(chainResult.mono());
given(this.chain.filter(any())).willReturn(Mono.empty());
this.csrfFilter.filter(this.get, this.chain).block();
Mono<CsrfToken> result = this.get.getAttribute(CsrfToken.class.getName());
result.block();
result.block();
assertThat(chainResult.subscribeCount()).isEqualTo(1);
}
private CsrfToken createXorCsrfToken() {
ServerCsrfTokenRequestHandler handler = new XorServerCsrfTokenRequestAttributeHandler();
MockServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("/"));
handler.handle(exchange, Mono.just(this.token));
Mono<CsrfToken> csrfToken = exchange.getAttribute(CsrfToken.class.getName());
return csrfToken.block();
}
@RestController
static | CsrfWebFilterTests |
java | apache__rocketmq | common/src/main/java/org/apache/rocketmq/common/utils/IPAddressUtils.java | {
"start": 1023,
"end": 3848
} | class ____ {
private static final String SLASH = "/";
private static final InetAddressValidator VALIDATOR = InetAddressValidator.getInstance();
public static boolean isValidIPOrCidr(String ipOrCidr) {
return isValidIp(ipOrCidr) || isValidCidr(ipOrCidr);
}
public static boolean isValidIp(String ip) {
return VALIDATOR.isValid(ip);
}
public static boolean isValidIPv4(String ip) {
return VALIDATOR.isValidInet4Address(ip);
}
public static boolean isValidIPv6(String ip) {
return VALIDATOR.isValidInet6Address(ip);
}
public static boolean isValidCidr(String cidr) {
return isValidIPv4Cidr(cidr) || isValidIPv6Cidr(cidr);
}
public static boolean isValidIPv4Cidr(String cidr) {
try {
String[] parts = cidr.split(SLASH);
if (parts.length != 2) {
return false;
}
InetAddress ip = InetAddress.getByName(parts[0]);
if (ip.getAddress().length != 4) {
return false;
}
int prefix = Integer.parseInt(parts[1]);
return prefix >= 0 && prefix <= 32;
} catch (Exception e) {
return false;
}
}
public static boolean isValidIPv6Cidr(String cidr) {
try {
String[] parts = cidr.split(SLASH);
if (parts.length != 2) {
return false;
}
InetAddress ip = InetAddress.getByName(parts[0]);
if (ip.getAddress().length != 16) {
return false;
}
int prefix = Integer.parseInt(parts[1]);
return prefix >= 0 && prefix <= 128;
} catch (Exception e) {
return false;
}
}
public static boolean isIPInRange(String ip, String cidr) {
try {
String[] parts = cidr.split(SLASH);
if (parts.length == 1) {
return StringUtils.equals(ip, cidr);
}
if (parts.length != 2) {
return false;
}
InetAddress cidrIp = InetAddress.getByName(parts[0]);
int prefixLength = Integer.parseInt(parts[1]);
BigInteger cidrIpBigInt = new BigInteger(1, cidrIp.getAddress());
BigInteger ipBigInt = new BigInteger(1, InetAddress.getByName(ip).getAddress());
BigInteger mask = BigInteger.valueOf(-1).shiftLeft(cidrIp.getAddress().length * 8 - prefixLength);
BigInteger cidrIpLower = cidrIpBigInt.and(mask);
BigInteger cidrIpUpper = cidrIpLower.add(mask.not());
return ipBigInt.compareTo(cidrIpLower) >= 0 && ipBigInt.compareTo(cidrIpUpper) <= 0;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| IPAddressUtils |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/util/NativeTypes.java | {
"start": 12520,
"end": 18152
} | class ____ implements LiteralAnalyzer {
@Override
public void validate(String s) {
NumberRepresentation br = new NumberRepresentation( s, true, false, false ) {
@Override
void parse(String val, int radix) {
Short.parseShort( val, radix );
}
};
br.validate();
}
@Override
public Class<?> getLiteral() {
return int.class;
}
}
private NativeTypes() {
}
static {
Map<Class<?>, Class<?>> tmp = new HashMap<>();
tmp.put( Byte.class, byte.class );
tmp.put( Short.class, short.class );
tmp.put( Integer.class, int.class );
tmp.put( Long.class, long.class );
tmp.put( Float.class, float.class );
tmp.put( Double.class, double.class );
tmp.put( Boolean.class, boolean.class );
tmp.put( Character.class, char.class );
WRAPPER_TO_PRIMITIVE_TYPES = Collections.unmodifiableMap( tmp );
tmp = new HashMap<>();
tmp.put( byte.class, Byte.class );
tmp.put( short.class, Short.class );
tmp.put( int.class, Integer.class );
tmp.put( long.class, Long.class );
tmp.put( float.class, Float.class );
tmp.put( double.class, Double.class );
tmp.put( boolean.class, Boolean.class );
tmp.put( char.class, Character.class );
PRIMITIVE_TO_WRAPPER_TYPES = Collections.unmodifiableMap( tmp );
NUMBER_TYPES.add( byte.class );
NUMBER_TYPES.add( short.class );
NUMBER_TYPES.add( int.class );
NUMBER_TYPES.add( long.class );
NUMBER_TYPES.add( float.class );
NUMBER_TYPES.add( double.class );
NUMBER_TYPES.add( Byte.class );
NUMBER_TYPES.add( Short.class );
NUMBER_TYPES.add( Integer.class );
NUMBER_TYPES.add( Long.class );
NUMBER_TYPES.add( Float.class );
NUMBER_TYPES.add( Double.class );
NUMBER_TYPES.add( BigInteger.class );
NUMBER_TYPES.add( BigDecimal.class );
Map<String, LiteralAnalyzer> tmp2 = new HashMap<>();
tmp2.put( boolean.class.getCanonicalName(), new BooleanAnalyzer() );
tmp2.put( Boolean.class.getCanonicalName(), new BooleanAnalyzer() );
tmp2.put( char.class.getCanonicalName(), new CharAnalyzer() );
tmp2.put( Character.class.getCanonicalName(), new CharAnalyzer() );
tmp2.put( byte.class.getCanonicalName(), new ByteAnalyzer() );
tmp2.put( Byte.class.getCanonicalName(), new ByteAnalyzer() );
tmp2.put( double.class.getCanonicalName(), new DoubleAnalyzer() );
tmp2.put( Double.class.getCanonicalName(), new DoubleAnalyzer() );
tmp2.put( float.class.getCanonicalName(), new FloatAnalyzer() );
tmp2.put( Float.class.getCanonicalName(), new FloatAnalyzer() );
tmp2.put( int.class.getCanonicalName(), new IntAnalyzer() );
tmp2.put( Integer.class.getCanonicalName(), new IntAnalyzer() );
tmp2.put( long.class.getCanonicalName(), new LongAnalyzer() );
tmp2.put( Long.class.getCanonicalName(), new LongAnalyzer() );
tmp2.put( short.class.getCanonicalName(), new ShortAnalyzer() );
tmp2.put( Short.class.getCanonicalName(), new ShortAnalyzer() );
ANALYZERS = Collections.unmodifiableMap( tmp2 );
TYPE_KIND_NAME.put( TypeKind.BOOLEAN, "boolean" );
TYPE_KIND_NAME.put( TypeKind.BYTE, "byte" );
TYPE_KIND_NAME.put( TypeKind.SHORT, "short" );
TYPE_KIND_NAME.put( TypeKind.INT, "int" );
TYPE_KIND_NAME.put( TypeKind.LONG, "long" );
TYPE_KIND_NAME.put( TypeKind.CHAR, "char" );
TYPE_KIND_NAME.put( TypeKind.FLOAT, "float" );
TYPE_KIND_NAME.put( TypeKind.DOUBLE, "double" );
Map<String, Integer> tmp3 = new HashMap<>( );
tmp3.put( byte.class.getName(), 1 );
tmp3.put( Byte.class.getName(), 1 );
tmp3.put( short.class.getName(), 2 );
tmp3.put( Short.class.getName(), 2 );
tmp3.put( int.class.getName(), 3 );
tmp3.put( Integer.class.getName(), 3 );
tmp3.put( long.class.getName(), 4 );
tmp3.put( Long.class.getName(), 4 );
tmp3.put( float.class.getName(), 5 );
tmp3.put( Float.class.getName(), 5 );
tmp3.put( double.class.getName(), 6 );
tmp3.put( Double.class.getName(), 6 );
tmp3.put( BigInteger.class.getName(), 50 );
tmp3.put( BigDecimal.class.getName(), 51 );
tmp3.put( String.class.getName(), 51 );
NARROWING_LUT = Collections.unmodifiableMap( tmp3 );
}
public static Class<?> getWrapperType(Class<?> clazz) {
if ( !clazz.isPrimitive() ) {
throw new IllegalArgumentException( clazz + " is no primitive type." );
}
return PRIMITIVE_TO_WRAPPER_TYPES.get( clazz );
}
public static Class<?> getPrimitiveType(Class<?> clazz) {
if ( clazz.isPrimitive() ) {
throw new IllegalArgumentException( clazz + " is no wrapper type." );
}
return WRAPPER_TO_PRIMITIVE_TYPES.get( clazz );
}
public static boolean isNative(String fullyQualifiedName) {
return ANALYZERS.containsKey( fullyQualifiedName );
}
public static boolean isNumber(Class<?> clazz) {
if ( clazz == null ) {
return false;
}
else {
return NUMBER_TYPES.contains( clazz );
}
}
/**
*
* @param className FQN of the literal native class
* @param literal literal to verify
* @return literal | ShortAnalyzer |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/IdentifierNameTest.java | {
"start": 7596,
"end": 7709
} | class ____ {
@IAmATest
public void possibly_a_test_name() {}
private @ | Test |
java | redisson__redisson | redisson/src/main/java/org/redisson/liveobject/core/RedissonObjectBuilder.java | {
"start": 5146,
"end": 9636
} | class ____ @This object.
*/
private Codec getFieldCodec(Class<?> rEntity, Class<? extends RObject> rObjectClass, String fieldName) throws ReflectiveOperationException {
Field field = ClassUtils.getDeclaredField(rEntity, fieldName);
if (field.isAnnotationPresent(RObjectField.class)) {
RObjectField anno = field.getAnnotation(RObjectField.class);
return codecProvider.getCodec(anno, rEntity, rObjectClass, fieldName, config);
} else {
REntity anno = ClassUtils.getAnnotation(rEntity, REntity.class);
return codecProvider.getCodec(anno, rEntity, config);
}
}
public NamingScheme getNamingScheme(Class<?> entityClass) {
REntity anno = ClassUtils.getAnnotation(entityClass, REntity.class);
Codec codec = codecProvider.getCodec(anno, entityClass, config);
return getNamingScheme(entityClass, codec);
}
public NamingScheme getNamingScheme(Class<?> rEntity, Codec c) {
REntity anno = ClassUtils.getAnnotation(rEntity, REntity.class);
try {
return anno.namingScheme().getDeclaredConstructor(Codec.class).newInstance(c);
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
}
private Class<? extends RObject> getMappedClass(Class<?> cls) {
for (Entry<Class<?>, Class<? extends RObject>> entrySet : SUPPORTED_CLASS_MAPPING.entrySet()) {
if (entrySet.getKey().isAssignableFrom(cls)) {
return entrySet.getValue();
}
}
return null;
}
private static void fillCodecMethods(Class<?> clientClazz, Class<?> objectClazz) {
for (Method method : clientClazz.getDeclaredMethods()) {
if (!method.getReturnType().equals(Void.TYPE)
&& objectClazz.isAssignableFrom(method.getReturnType())
&& method.getName().startsWith("get")) {
Class<?> cls = method.getReturnType();
if (method.getParameterTypes().length == 2 //first param is name, second param is codec.
&& String.class == method.getParameterTypes()[0]
&& Codec.class.isAssignableFrom(method.getParameterTypes()[1])) {
CUSTOM_CODEC_REFERENCES.put(cls, method);
} else if (method.getParameterTypes().length == 1
&& String.class == method.getParameterTypes()[0]) {
DEFAULT_CODEC_REFERENCES.put(cls, method);
}
}
}
}
public Object fromReference(RedissonReference rr, ReferenceType type) throws ReflectiveOperationException {
if (type == ReferenceType.REACTIVE) {
return fromReference(redissonReactive, rr);
} else if (type == ReferenceType.RXJAVA) {
return fromReference(redissonRx, rr);
}
return fromReference(redisson, rr);
}
private Object fromReference(RedissonClient redisson, RedissonReference rr) throws ReflectiveOperationException {
Class<?> type = rr.getType();
if (ClassUtils.isAnnotationPresent(type, REntity.class)) {
RedissonLiveObjectService liveObjectService = (RedissonLiveObjectService) redisson.getLiveObjectService();
NamingScheme ns = getNamingScheme(type);
Object id = ns.resolveId(rr.getKeyName());
return liveObjectService.createLiveObject(type, id);
}
return getObject(redisson, rr, type, codecProvider);
}
private Object getObject(Object redisson, RedissonReference rr, Class<?> type,
ReferenceCodecProvider codecProvider) throws ReflectiveOperationException {
if (type != null) {
if (!DEFAULT_CODEC_REFERENCES.containsKey(type) && type.getInterfaces().length > 0) {
type = type.getInterfaces()[0];
}
if (isDefaultCodec(rr)) {
Method m = DEFAULT_CODEC_REFERENCES.get(type);
if (m != null) {
return m.invoke(redisson, rr.getKeyName());
}
} else {
Method m = CUSTOM_CODEC_REFERENCES.get(type);
if (m != null) {
return m.invoke(redisson, rr.getKeyName(), codecProvider.getCodec(rr.getCodecType()));
}
}
}
throw new ClassNotFoundException("No RObject is found to match | of |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/synthetic/removeTypes/SyntheticBeanBuildItemRemoveTypesTest.java | {
"start": 3459,
"end": 3514
} | class ____ implements CharlieInterface {
}
| Charlie |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java | {
"start": 1574,
"end": 27050
} | class ____ {
private static final String EXTRA_RESOURCE_NAME = "test";
private ResourceCalculator resourceCalculator;
public static Collection<Object[]> getParameters() {
return Arrays.asList(new Object[][]{
{"DefaultResourceCalculator", new DefaultResourceCalculator()},
{"DominantResourceCalculator", new DominantResourceCalculator()}});
}
@BeforeEach
public void setupNoExtraResource() {
// This has to run before each test because we don't know when
// setupExtraResource() might be called
ResourceUtils.resetResourceTypes(new Configuration());
}
private static void setupExtraResource() {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RESOURCE_TYPES, EXTRA_RESOURCE_NAME);
ResourceUtils.resetResourceTypes(conf);
}
public void initTestResourceCalculator(String name, ResourceCalculator rs) {
this.resourceCalculator = rs;
}
@MethodSource("getParameters")
@ParameterizedTest(name = "{0}")
@Timeout(10000)
void testFitsIn(String name, ResourceCalculator rs) {
initTestResourceCalculator(name, rs);
if (resourceCalculator instanceof DefaultResourceCalculator) {
assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(2, 1)));
assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(2, 2)));
assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(1, 2)));
assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(1, 1)));
assertFalse(resourceCalculator.fitsIn(
Resource.newInstance(2, 1), Resource.newInstance(1, 2)));
} else if (resourceCalculator instanceof DominantResourceCalculator) {
assertFalse(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(2, 1)));
assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(2, 2)));
assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(1, 2)));
assertFalse(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(1, 1)));
assertFalse(resourceCalculator.fitsIn(
Resource.newInstance(2, 1), Resource.newInstance(1, 2)));
}
}
private Resource newResource(long memory, int cpu) {
Resource res = Resource.newInstance(memory, cpu);
return res;
}
private Resource newResource(long memory, int cpu, int extraResource) {
Resource res = newResource(memory, cpu);
res.setResourceValue(EXTRA_RESOURCE_NAME, extraResource);
return res;
}
/**
* Test that the compare() method returns the expected result (0, -1, or 1).
* If the expected result is not 0, this method will also test the resources
* in the opposite order and check for the negative of the expected result.
*
* @param cluster the cluster resource
* @param res1 the LHS resource
* @param res2 the RHS resource
* @param expected the expected result
*/
private void assertComparison(Resource cluster, Resource res1, Resource res2,
int expected) {
int actual = resourceCalculator.compare(cluster, res1, res2);
assertEquals(expected, actual, String.format("Resource comparison did not give the expected "
+ "result for %s v/s %s", res1.toString(), res2.toString()));
if (expected != 0) {
// Try again with args in the opposite order and the negative of the
// expected result.
actual = resourceCalculator.compare(cluster, res2, res1);
assertEquals(expected * -1, actual, String.format("Resource comparison did not give the "
+ "expected result for %s v/s %s", res2.toString(), res1.toString()));
}
}
@MethodSource("getParameters")
@ParameterizedTest(name = "{0}")
void testCompareWithOnlyMandatory(String name, ResourceCalculator rs) {
initTestResourceCalculator(name, rs);
// This test is necessary because there are optimizations that are only
// triggered when only the mandatory resources are configured.
// Keep cluster resources even so that the numbers are easy to understand
Resource cluster = newResource(4, 4);
assertComparison(cluster, newResource(1, 1), newResource(1, 1), 0);
assertComparison(cluster, newResource(0, 0), newResource(0, 0), 0);
assertComparison(cluster, newResource(2, 2), newResource(1, 1), 1);
assertComparison(cluster, newResource(2, 2), newResource(0, 0), 1);
if (resourceCalculator instanceof DefaultResourceCalculator) {
testCompareDefaultWithOnlyMandatory(cluster);
} else if (resourceCalculator instanceof DominantResourceCalculator) {
testCompareDominantWithOnlyMandatory(cluster);
}
}
private void testCompareDefaultWithOnlyMandatory(Resource cluster) {
assertComparison(cluster, newResource(1, 1), newResource(1, 1), 0);
assertComparison(cluster, newResource(1, 2), newResource(1, 1), 0);
assertComparison(cluster, newResource(1, 1), newResource(1, 0), 0);
assertComparison(cluster, newResource(2, 1), newResource(1, 1), 1);
assertComparison(cluster, newResource(2, 1), newResource(1, 2), 1);
assertComparison(cluster, newResource(2, 1), newResource(1, 0), 1);
}
private void testCompareDominantWithOnlyMandatory(Resource cluster) {
assertComparison(cluster, newResource(2, 1), newResource(2, 1), 0);
assertComparison(cluster, newResource(2, 1), newResource(1, 2), 0);
assertComparison(cluster, newResource(2, 1), newResource(1, 1), 1);
assertComparison(cluster, newResource(2, 2), newResource(2, 1), 1);
assertComparison(cluster, newResource(2, 2), newResource(1, 2), 1);
assertComparison(cluster, newResource(3, 1), newResource(3, 0), 1);
}
@MethodSource("getParameters")
@ParameterizedTest(name = "{0}")
void testCompare(String name, ResourceCalculator rs) {
initTestResourceCalculator(name, rs);
// Test with 3 resources
setupExtraResource();
// Keep cluster resources even so that the numbers are easy to understand
Resource cluster = newResource(4L, 4, 4);
assertComparison(cluster, newResource(1, 1, 1), newResource(1, 1, 1), 0);
assertComparison(cluster, newResource(0, 0, 0), newResource(0, 0, 0), 0);
assertComparison(cluster, newResource(2, 2, 2), newResource(1, 1, 1), 1);
assertComparison(cluster, newResource(2, 2, 2), newResource(0, 0, 0), 1);
if (resourceCalculator instanceof DefaultResourceCalculator) {
testCompareDefault(cluster);
} else if (resourceCalculator instanceof DominantResourceCalculator) {
testCompareDominant(cluster);
testCompareDominantZeroValueResource();
}
}
private void testCompareDefault(Resource cluster) {
assertComparison(cluster, newResource(1, 1, 2), newResource(1, 1, 1), 0);
assertComparison(cluster, newResource(1, 2, 1), newResource(1, 1, 1), 0);
assertComparison(cluster, newResource(1, 2, 2), newResource(1, 1, 1), 0);
assertComparison(cluster, newResource(1, 2, 2), newResource(1, 0, 0), 0);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 1), 1);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 2, 1), 1);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 2), 1);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 2, 2), 1);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 0, 0), 1);
}
/**
* Verify compare when one or all the resource are zero.
*/
private void testCompareDominantZeroValueResource() {
Resource cluster = newResource(4L, 4, 0);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 2), 1);
assertComparison(cluster, newResource(2, 2, 1), newResource(1, 2, 2), 1);
assertComparison(cluster, newResource(2, 2, 1), newResource(2, 2, 2), 0);
assertComparison(cluster, newResource(0, 2, 1), newResource(0, 2, 2), 0);
assertComparison(cluster, newResource(0, 1, 2), newResource(1, 1, 2), -1);
assertComparison(cluster, newResource(1, 1, 2), newResource(2, 1, 2), -1);
// cluster resource zero
cluster = newResource(0, 0, 0);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 1), 1);
assertComparison(cluster, newResource(2, 2, 2), newResource(1, 1, 1), 1);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 2, 1), 0);
assertComparison(cluster, newResource(1, 1, 1), newResource(1, 1, 1), 0);
assertComparison(cluster, newResource(1, 1, 1), newResource(1, 1, 2), -1);
assertComparison(cluster, newResource(1, 1, 1), newResource(1, 2, 1), -1);
}
private void testCompareDominant(Resource cluster) {
assertComparison(cluster, newResource(2, 1, 1), newResource(2, 1, 1), 0);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 2, 1), 0);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 2), 0);
assertComparison(cluster, newResource(2, 1, 0), newResource(0, 1, 2), 0);
assertComparison(cluster, newResource(2, 2, 1), newResource(1, 2, 2), 0);
assertComparison(cluster, newResource(2, 2, 1), newResource(2, 1, 2), 0);
assertComparison(cluster, newResource(2, 2, 1), newResource(2, 2, 1), 0);
assertComparison(cluster, newResource(2, 2, 0), newResource(2, 0, 2), 0);
assertComparison(cluster, newResource(3, 2, 1), newResource(3, 2, 1), 0);
assertComparison(cluster, newResource(3, 2, 1), newResource(3, 1, 2), 0);
assertComparison(cluster, newResource(3, 2, 1), newResource(1, 2, 3), 0);
assertComparison(cluster, newResource(3, 2, 1), newResource(1, 3, 2), 0);
assertComparison(cluster, newResource(3, 2, 1), newResource(2, 1, 3), 0);
assertComparison(cluster, newResource(3, 2, 1), newResource(2, 3, 1), 0);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 1), 1);
assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 0), 1);
assertComparison(cluster, newResource(2, 2, 1), newResource(2, 1, 1), 1);
assertComparison(cluster, newResource(2, 2, 1), newResource(1, 2, 1), 1);
assertComparison(cluster, newResource(2, 2, 1), newResource(1, 1, 2), 1);
assertComparison(cluster, newResource(2, 2, 1), newResource(0, 2, 2), 1);
assertComparison(cluster, newResource(2, 2, 2), newResource(2, 1, 1), 1);
assertComparison(cluster, newResource(2, 2, 2), newResource(1, 2, 1), 1);
assertComparison(cluster, newResource(2, 2, 2), newResource(1, 1, 2), 1);
assertComparison(cluster, newResource(2, 2, 2), newResource(2, 2, 1), 1);
assertComparison(cluster, newResource(2, 2, 2), newResource(2, 1, 2), 1);
assertComparison(cluster, newResource(2, 2, 2), newResource(1, 2, 2), 1);
assertComparison(cluster, newResource(3, 2, 1), newResource(2, 2, 2), 1);
assertComparison(cluster, newResource(3, 1, 1), newResource(2, 2, 2), 1);
assertComparison(cluster, newResource(3, 1, 1), newResource(3, 1, 0), 1);
assertComparison(cluster, newResource(3, 1, 1), newResource(3, 0, 0), 1);
}
@MethodSource("getParameters")
@ParameterizedTest(name = "{0}")
@Timeout(10000)
void testCompareWithEmptyCluster(String name, ResourceCalculator rs) {
initTestResourceCalculator(name, rs);
Resource clusterResource = Resource.newInstance(0, 0);
// For lhs == rhs
Resource lhs = Resource.newInstance(0, 0);
Resource rhs = Resource.newInstance(0, 0);
assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
true, lhs, lhs);
// lhs > rhs
lhs = Resource.newInstance(1, 1);
rhs = Resource.newInstance(0, 0);
assertResourcesOperations(clusterResource, lhs, rhs, false, false, true,
true, lhs, rhs);
// For lhs < rhs
lhs = Resource.newInstance(0, 0);
rhs = Resource.newInstance(1, 1);
assertResourcesOperations(clusterResource, lhs, rhs, true, true, false,
false, rhs, lhs);
if (!(resourceCalculator instanceof DominantResourceCalculator)) {
return;
}
// verify for 2 dimensional resources i.e memory and cpu
// dominant resource types
lhs = Resource.newInstance(1, 0);
rhs = Resource.newInstance(0, 1);
assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
true, lhs, lhs);
lhs = Resource.newInstance(0, 1);
rhs = Resource.newInstance(1, 0);
assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
true, lhs, lhs);
lhs = Resource.newInstance(1, 1);
rhs = Resource.newInstance(1, 0);
assertResourcesOperations(clusterResource, lhs, rhs, false, false, true,
true, lhs, rhs);
lhs = Resource.newInstance(0, 1);
rhs = Resource.newInstance(1, 1);
assertResourcesOperations(clusterResource, lhs, rhs, true, true, false,
false, rhs, lhs);
}
private void assertResourcesOperations(Resource clusterResource,
Resource lhs, Resource rhs, boolean lessThan, boolean lessThanOrEqual,
boolean greaterThan, boolean greaterThanOrEqual, Resource max,
Resource min) {
assertEquals(lessThan,
Resources.lessThan(resourceCalculator, clusterResource, lhs, rhs),
"Less Than operation is wrongly calculated.");
assertEquals(
lessThanOrEqual, Resources.lessThanOrEqual(resourceCalculator,
clusterResource, lhs, rhs), "Less Than Or Equal To operation is wrongly calculated.");
assertEquals(greaterThan,
Resources.greaterThan(resourceCalculator, clusterResource, lhs, rhs),
"Greater Than operation is wrongly calculated.");
assertEquals(greaterThanOrEqual,
Resources.greaterThanOrEqual(resourceCalculator, clusterResource, lhs, rhs),
"Greater Than Or Equal To operation is wrongly calculated.");
assertEquals(max,
Resources.max(resourceCalculator, clusterResource, lhs, rhs),
"Max(value) Operation wrongly calculated.");
assertEquals(min,
Resources.min(resourceCalculator, clusterResource, lhs, rhs),
"Min(value) operation is wrongly calculated.");
}
/**
* Test resource normalization.
*/
@MethodSource("getParameters")
@ParameterizedTest(name = "{0}")
@Timeout(10000)
void testNormalize(String name, ResourceCalculator rs) {
initTestResourceCalculator(name, rs);
// requested resources value cannot be an arbitrary number.
Resource ask = Resource.newInstance(1111, 2);
Resource min = Resource.newInstance(1024, 1);
Resource max = Resource.newInstance(8 * 1024, 8);
Resource increment = Resource.newInstance(1024, 4);
if (resourceCalculator instanceof DefaultResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
assertEquals(2 * 1024, result.getMemorySize());
} else if (resourceCalculator instanceof DominantResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
assertEquals(2 * 1024, result.getMemorySize());
assertEquals(4, result.getVirtualCores());
}
// if resources asked are less than minimum resource, then normalize it to
// minimum resource.
ask = Resource.newInstance(512, 0);
min = Resource.newInstance(2 * 1024, 2);
max = Resource.newInstance(8 * 1024, 8);
increment = Resource.newInstance(1024, 1);
if (resourceCalculator instanceof DefaultResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
assertEquals(2 * 1024, result.getMemorySize());
} else if (resourceCalculator instanceof DominantResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
assertEquals(2 * 1024, result.getMemorySize());
assertEquals(2, result.getVirtualCores());
}
// if resources asked are larger than maximum resource, then normalize it to
// maximum resources.
ask = Resource.newInstance(9 * 1024, 9);
min = Resource.newInstance(2 * 1024, 2);
max = Resource.newInstance(8 * 1024, 8);
increment = Resource.newInstance(1024, 1);
if (resourceCalculator instanceof DefaultResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
assertEquals(8 * 1024, result.getMemorySize());
} else if (resourceCalculator instanceof DominantResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
assertEquals(8 * 1024, result.getMemorySize());
assertEquals(8, result.getVirtualCores());
}
// if increment is 0, use minimum resource as the increment resource.
ask = Resource.newInstance(1111, 2);
min = Resource.newInstance(2 * 1024, 2);
max = Resource.newInstance(8 * 1024, 8);
increment = Resource.newInstance(0, 0);
if (resourceCalculator instanceof DefaultResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
assertEquals(2 * 1024, result.getMemorySize());
} else if (resourceCalculator instanceof DominantResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
assertEquals(2 * 1024, result.getMemorySize());
assertEquals(2, result.getVirtualCores());
}
}
@MethodSource("getParameters")
@ParameterizedTest(name = "{0}")
void testDivisionByZeroRatioDenominatorIsZero(String name, ResourceCalculator rs) {
initTestResourceCalculator(name, rs);
float ratio = resourceCalculator.ratio(newResource(1, 1), newResource(0,
0));
assertEquals(Float.POSITIVE_INFINITY, ratio, 0.00001);
}
@MethodSource("getParameters")
@ParameterizedTest(name = "{0}")
void testDivisionByZeroRatioNumeratorAndDenominatorIsZero(String name, ResourceCalculator rs) {
initTestResourceCalculator(name, rs);
float ratio = resourceCalculator.ratio(newResource(0, 0), newResource(0,
0));
assertEquals(0.0, ratio, 0.00001);
}
@MethodSource("getParameters")
@ParameterizedTest(name = "{0}")
void testFitsInDiagnosticsCollector(String name, ResourceCalculator rs) {
initTestResourceCalculator(name, rs);
if (resourceCalculator instanceof DefaultResourceCalculator) {
// required-resource = (0, 0)
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 0),
newResource(0, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 0),
newResource(0, 1)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 0),
newResource(1, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 0),
newResource(1, 1)));
// required-resource = (0, 1)
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 1),
newResource(0, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 1),
newResource(0, 1)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 1),
newResource(1, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 1),
newResource(1, 1)));
// required-resource = (1, 0)
assertEquals(ImmutableSet.of(ResourceInformation.MEMORY_URI),
resourceCalculator.getInsufficientResourceNames(newResource(1, 0),
newResource(0, 0)));
assertEquals(ImmutableSet.of(ResourceInformation.MEMORY_URI),
resourceCalculator.getInsufficientResourceNames(newResource(1, 0),
newResource(0, 1)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(1, 0),
newResource(1, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(1, 0),
newResource(1, 1)));
// required-resource = (1, 1)
assertEquals(ImmutableSet.of(ResourceInformation.MEMORY_URI),
resourceCalculator.getInsufficientResourceNames(newResource(1, 1),
newResource(0, 0)));
assertEquals(ImmutableSet.of(ResourceInformation.MEMORY_URI),
resourceCalculator.getInsufficientResourceNames(newResource(1, 1),
newResource(0, 1)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(1, 1),
newResource(1, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(1, 1),
newResource(1, 1)));
} else if (resourceCalculator instanceof DominantResourceCalculator) {
// required-resource = (0, 0)
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 0),
newResource(0, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 0),
newResource(0, 1)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 0),
newResource(1, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 0),
newResource(1, 1)));
// required-resource = (0, 1)
assertEquals(ImmutableSet.of(ResourceInformation.VCORES_URI),
resourceCalculator.getInsufficientResourceNames(newResource(0, 1),
newResource(0, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 1),
newResource(0, 1)));
assertEquals(ImmutableSet.of(ResourceInformation.VCORES_URI),
resourceCalculator.getInsufficientResourceNames(newResource(0, 1),
newResource(1, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(0, 1),
newResource(1, 1)));
// required-resource = (1, 0)
assertEquals(ImmutableSet.of(ResourceInformation.MEMORY_URI),
resourceCalculator.getInsufficientResourceNames(newResource(1, 0),
newResource(0, 0)));
assertEquals(ImmutableSet.of(ResourceInformation.MEMORY_URI),
resourceCalculator.getInsufficientResourceNames(newResource(1, 0),
newResource(0, 1)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(1, 0),
newResource(1, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(1, 0),
newResource(1, 1)));
// required-resource = (1, 1)
assertEquals(ImmutableSet.of(ResourceInformation.MEMORY_URI,
ResourceInformation.VCORES_URI), resourceCalculator
.getInsufficientResourceNames(newResource(1, 1), newResource(0, 0)));
assertEquals(ImmutableSet.of(ResourceInformation.MEMORY_URI),
resourceCalculator.getInsufficientResourceNames(newResource(1, 1),
newResource(0, 1)));
assertEquals(ImmutableSet.of(ResourceInformation.VCORES_URI),
resourceCalculator.getInsufficientResourceNames(newResource(1, 1),
newResource(1, 0)));
assertEquals(ImmutableSet.of(),
resourceCalculator.getInsufficientResourceNames(newResource(1, 1),
newResource(1, 1)));
}
}
@MethodSource("getParameters")
@ParameterizedTest(name = "{0}")
void testRatioWithNoExtraResource(String name, ResourceCalculator rs) {
initTestResourceCalculator(name, rs);
//setup
Resource resource1 = newResource(1, 1);
Resource resource2 = newResource(2, 1);
//act
float ratio = resourceCalculator.ratio(resource1, resource2);
//assert
if (resourceCalculator instanceof DefaultResourceCalculator) {
double ratioOfMemories = 0.5;
assertEquals(ratioOfMemories, ratio, 0.00001);
} else if (resourceCalculator instanceof DominantResourceCalculator) {
double ratioOfCPUs = 1.0;
assertEquals(ratioOfCPUs, ratio, 0.00001);
}
}
@MethodSource("getParameters")
@ParameterizedTest(name = "{0}")
void testRatioWithExtraResource(String name, ResourceCalculator rs) {
initTestResourceCalculator(name, rs);
//setup
setupExtraResource();
Resource resource1 = newResource(1, 1, 2);
Resource resource2 = newResource(2, 1, 1);
//act
float ratio = resourceCalculator.ratio(resource1, resource2);
//assert
if (resourceCalculator instanceof DefaultResourceCalculator) {
double ratioOfMemories = 0.5;
assertEquals(ratioOfMemories, ratio, 0.00001);
} else if (resourceCalculator instanceof DominantResourceCalculator) {
double ratioOfExtraResources = 2.0;
assertEquals(ratioOfExtraResources, ratio, 0.00001);
}
}
} | TestResourceCalculator |
java | apache__camel | components/camel-undertow/src/test/java/org/apache/camel/component/undertow/rest/RestUndertowProducerEncodingTest.java | {
"start": 1066,
"end": 3034
} | class ____ extends BaseUndertowTest {
@Test
public void testSelect() {
template.sendBody("rest:get:bw-web-api/v1/objects/timesheets?companyId=RD&select=personId,personName", "Hello World");
}
@Test
public void testFilter() {
template.sendBody("rest:get:bw-web-api/v1/objects/timesheets?companyId=RD&select=personId,personName"
+ "&filter=date(time/date) ge 2020-06-01 and personId eq 'R10019'",
"Bye World");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// configure to use undertow on localhost with the given port
restConfiguration().component("undertow").host("localhost").port(getPort());
// use the rest DSL to define the rest services
rest("/bw-web-api/v1/objects")
.get("{action}")
.to("direct:action");
from("direct:action")
.process(exchange -> {
String action = exchange.getIn().getHeader("action", String.class);
assertEquals("timesheets", action);
String select = exchange.getIn().getHeader("select", String.class);
assertEquals("personId,personName", select);
String cid = exchange.getIn().getHeader("companyId", String.class);
assertEquals("RD", cid);
String filter = exchange.getIn().getHeader("filter", String.class);
if (filter != null) {
assertEquals("date(time/date) ge 2020-06-01 and personId eq 'R10019'", filter);
}
});
}
};
}
}
| RestUndertowProducerEncodingTest |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertJapaneseTokenizationTests.java | {
"start": 635,
"end": 3565
} | class ____ extends AbstractBWCSerializationTestCase<BertJapaneseTokenization> {
private boolean lenient;
public static BertJapaneseTokenization mutateForVersion(BertJapaneseTokenization instance, TransportVersion version) {
if (version.before(TransportVersions.V_8_2_0)) {
return new BertJapaneseTokenization(
instance.doLowerCase,
instance.withSpecialTokens,
instance.maxSequenceLength,
instance.truncate,
null
);
}
return instance;
}
@Before
public void chooseStrictOrLenient() {
lenient = randomBoolean();
}
@Override
protected BertJapaneseTokenization doParseInstance(XContentParser parser) throws IOException {
return BertJapaneseTokenization.createJpParser(lenient).apply(parser, null);
}
@Override
protected Writeable.Reader<BertJapaneseTokenization> instanceReader() {
return BertJapaneseTokenization::new;
}
@Override
protected BertJapaneseTokenization createTestInstance() {
return createRandom();
}
@Override
protected BertJapaneseTokenization mutateInstance(BertJapaneseTokenization instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected BertJapaneseTokenization mutateInstanceForVersion(BertJapaneseTokenization instance, TransportVersion version) {
return mutateForVersion(instance, version);
}
public void testsBuildUpdatedTokenization() {
var update = new BertJapaneseTokenization(true, true, 100, Tokenization.Truncate.FIRST, -1).buildWindowingTokenization(50, 20);
assertEquals(Tokenization.Truncate.NONE, update.getTruncate());
assertEquals(50, update.maxSequenceLength());
assertEquals(20, update.getSpan());
}
public static BertJapaneseTokenization createRandom() {
return new BertJapaneseTokenization(
randomBoolean() ? null : randomBoolean(),
randomBoolean() ? null : randomBoolean(),
randomBoolean() ? null : randomIntBetween(1, 1024),
randomBoolean() ? null : randomFrom(Tokenization.Truncate.values()),
null
);
}
public static BertJapaneseTokenization createRandomWithSpan() {
Tokenization.Truncate truncate = randomBoolean() ? null : randomFrom(Tokenization.Truncate.values());
Integer maxSeq = randomBoolean() ? null : randomIntBetween(1, 1024);
return new BertJapaneseTokenization(
randomBoolean() ? null : randomBoolean(),
randomBoolean() ? null : randomBoolean(),
maxSeq,
truncate,
Tokenization.Truncate.NONE.equals(truncate) && randomBoolean() ? randomIntBetween(0, maxSeq != null ? maxSeq - 1 : 100) : null
);
}
}
| BertJapaneseTokenizationTests |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockCountersInPendingIBR.java | {
"start": 2039,
"end": 5801
} | class ____ {
@Test
public void testBlockCounters() throws Exception {
final Configuration conf = new HdfsConfiguration();
/*
* Set a really long value for dfs.blockreport.intervalMsec and
* dfs.heartbeat.interval, so that incremental block reports and heartbeats
* won't be sent during this test unless they're triggered manually.
*/
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DatanodeProtocolClientSideTranslatorPB spy =
InternalDataNodeTestUtils.spyOnBposToNN(
cluster.getDataNodes().get(0), cluster.getNameNode());
final DataNode datanode = cluster.getDataNodes().get(0);
/* We should get 0 incremental block report. */
Mockito.verify(spy, timeout(60000).times(0)).blockReceivedAndDeleted(
any(DatanodeRegistration.class),
anyString(),
any(StorageReceivedDeletedBlocks[].class));
/*
* Create fake blocks notification on the DataNode. This will be sent with
* the next incremental block report.
*/
final BPServiceActor actor =
datanode.getAllBpOs().get(0).getBPServiceActors().get(0);
final FsDatasetSpi<?> dataset = datanode.getFSDataset();
final DatanodeStorage storage;
try (FsDatasetSpi.FsVolumeReferences volumes =
dataset.getFsVolumeReferences()) {
storage = dataset.getStorage(volumes.get(0).getStorageID());
}
ReceivedDeletedBlockInfo rdbi = null;
/* block at status of RECEIVING_BLOCK */
rdbi = new ReceivedDeletedBlockInfo(
new Block(5678, 512, 1000), BlockStatus.RECEIVING_BLOCK, null);
actor.getIbrManager().addRDBI(rdbi, storage);
/* block at status of RECEIVED_BLOCK */
rdbi = new ReceivedDeletedBlockInfo(
new Block(5679, 512, 1000), BlockStatus.RECEIVED_BLOCK, null);
actor.getIbrManager().addRDBI(rdbi, storage);
/* block at status of DELETED_BLOCK */
rdbi = new ReceivedDeletedBlockInfo(
new Block(5680, 512, 1000), BlockStatus.DELETED_BLOCK, null);
actor.getIbrManager().addRDBI(rdbi, storage);
/* verify counters before sending IBR */
verifyBlockCounters(datanode, 3, 1, 1, 1);
/* Manually trigger a block report. */
datanode.triggerBlockReport(
new BlockReportOptions.Factory().
setIncremental(true).
build()
);
/*
* triggerBlockReport returns before the block report is actually sent. Wait
* for it to be sent here.
*/
Mockito.verify(spy, timeout(60000).times(1)).
blockReceivedAndDeleted(
any(DatanodeRegistration.class),
anyString(),
any(StorageReceivedDeletedBlocks[].class));
/* verify counters after sending IBR */
verifyBlockCounters(datanode, 0, 0, 0, 0);
cluster.shutdown();
}
private void verifyBlockCounters(final DataNode datanode,
final long blocksInPendingIBR, final long blocksReceivingInPendingIBR,
final long blocksReceivedInPendingIBR,
final long blocksDeletedInPendingIBR) {
final MetricsRecordBuilder m = MetricsAsserts
.getMetrics(datanode.getMetrics().name());
MetricsAsserts.assertGauge("BlocksInPendingIBR",
blocksInPendingIBR, m);
MetricsAsserts.assertGauge("BlocksReceivingInPendingIBR",
blocksReceivingInPendingIBR, m);
MetricsAsserts.assertGauge("BlocksReceivedInPendingIBR",
blocksReceivedInPendingIBR, m);
MetricsAsserts.assertGauge("BlocksDeletedInPendingIBR",
blocksDeletedInPendingIBR, m);
}
}
| TestBlockCountersInPendingIBR |
java | spring-projects__spring-boot | core/spring-boot-properties-migrator/src/main/java/org/springframework/boot/context/properties/migrator/PropertiesMigrationReport.java | {
"start": 4152,
"end": 4630
} | class ____ {
private final List<PropertyMigration> properties;
LegacyProperties(List<PropertyMigration> properties) {
this.properties = new ArrayList<>(properties);
}
List<PropertyMigration> getRenamed() {
return this.properties.stream().filter(PropertyMigration::isCompatibleType).toList();
}
List<PropertyMigration> getUnsupported() {
return this.properties.stream().filter((property) -> !property.isCompatibleType()).toList();
}
}
}
| LegacyProperties |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java | {
"start": 1050,
"end": 1306
} | class ____ {
private static final Logger logger = LogManager.getLogger(DatafeedTimingStatsReporter.class);
/** Interface used for persisting current timing stats to the results index. */
@FunctionalInterface
public | DatafeedTimingStatsReporter |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/subjects/PublishSubject.java | {
"start": 5093,
"end": 10484
} | class ____<T> extends Subject<T> {
/** The terminated indicator for the subscribers array. */
@SuppressWarnings("rawtypes")
static final PublishDisposable[] TERMINATED = new PublishDisposable[0];
/** An empty subscribers array to avoid allocating it all the time. */
@SuppressWarnings("rawtypes")
static final PublishDisposable[] EMPTY = new PublishDisposable[0];
/** The array of currently subscribed subscribers. */
final AtomicReference<PublishDisposable<T>[]> subscribers;
/** The error, write before terminating and read after checking subscribers. */
Throwable error;
/**
* Constructs a PublishSubject.
* @param <T> the value type
* @return the new PublishSubject
*/
@CheckReturnValue
@NonNull
public static <T> PublishSubject<T> create() {
return new PublishSubject<>();
}
/**
* Constructs a PublishSubject.
* @since 2.0
*/
@SuppressWarnings("unchecked")
PublishSubject() {
subscribers = new AtomicReference<>(EMPTY);
}
@Override
protected void subscribeActual(Observer<? super T> t) {
PublishDisposable<T> ps = new PublishDisposable<>(t, this);
t.onSubscribe(ps);
if (add(ps)) {
// if cancellation happened while a successful add, the remove() didn't work
// so we need to do it again
if (ps.isDisposed()) {
remove(ps);
}
} else {
Throwable ex = error;
if (ex != null) {
t.onError(ex);
} else {
t.onComplete();
}
}
}
/**
* Tries to add the given subscriber to the subscribers array atomically
* or returns false if the subject has terminated.
* @param ps the subscriber to add
* @return true if successful, false if the subject has terminated
*/
boolean add(PublishDisposable<T> ps) {
for (;;) {
PublishDisposable<T>[] a = subscribers.get();
if (a == TERMINATED) {
return false;
}
int n = a.length;
@SuppressWarnings("unchecked")
PublishDisposable<T>[] b = new PublishDisposable[n + 1];
System.arraycopy(a, 0, b, 0, n);
b[n] = ps;
if (subscribers.compareAndSet(a, b)) {
return true;
}
}
}
/**
* Atomically removes the given subscriber if it is subscribed to the subject.
* @param ps the subject to remove
*/
@SuppressWarnings("unchecked")
void remove(PublishDisposable<T> ps) {
for (;;) {
PublishDisposable<T>[] a = subscribers.get();
if (a == TERMINATED || a == EMPTY) {
return;
}
int n = a.length;
int j = -1;
for (int i = 0; i < n; i++) {
if (a[i] == ps) {
j = i;
break;
}
}
if (j < 0) {
return;
}
PublishDisposable<T>[] b;
if (n == 1) {
b = EMPTY;
} else {
b = new PublishDisposable[n - 1];
System.arraycopy(a, 0, b, 0, j);
System.arraycopy(a, j + 1, b, j, n - j - 1);
}
if (subscribers.compareAndSet(a, b)) {
return;
}
}
}
@Override
public void onSubscribe(Disposable d) {
if (subscribers.get() == TERMINATED) {
d.dispose();
}
}
@Override
public void onNext(T t) {
ExceptionHelper.nullCheck(t, "onNext called with a null value.");
for (PublishDisposable<T> pd : subscribers.get()) {
pd.onNext(t);
}
}
@SuppressWarnings("unchecked")
@Override
public void onError(Throwable t) {
ExceptionHelper.nullCheck(t, "onError called with a null Throwable.");
if (subscribers.get() == TERMINATED) {
RxJavaPlugins.onError(t);
return;
}
error = t;
for (PublishDisposable<T> pd : subscribers.getAndSet(TERMINATED)) {
pd.onError(t);
}
}
@SuppressWarnings("unchecked")
@Override
public void onComplete() {
if (subscribers.get() == TERMINATED) {
return;
}
for (PublishDisposable<T> pd : subscribers.getAndSet(TERMINATED)) {
pd.onComplete();
}
}
@Override
@CheckReturnValue
public boolean hasObservers() {
return subscribers.get().length != 0;
}
@Override
@Nullable
@CheckReturnValue
public Throwable getThrowable() {
if (subscribers.get() == TERMINATED) {
return error;
}
return null;
}
@Override
@CheckReturnValue
public boolean hasThrowable() {
return subscribers.get() == TERMINATED && error != null;
}
@Override
@CheckReturnValue
public boolean hasComplete() {
return subscribers.get() == TERMINATED && error == null;
}
/**
* Wraps the actual subscriber, tracks its requests and makes cancellation
* to remove itself from the current subscribers array.
*
* @param <T> the value type
*/
static final | PublishSubject |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/codec/ByteArrayEncoder.java | {
"start": 1153,
"end": 2363
} | class ____ extends AbstractEncoder<byte[]> {
public ByteArrayEncoder() {
super(MimeTypeUtils.ALL);
}
@Override
public boolean canEncode(ResolvableType elementType, @Nullable MimeType mimeType) {
Class<?> clazz = elementType.toClass();
return super.canEncode(elementType, mimeType) && byte[].class.isAssignableFrom(clazz);
}
@Override
public Flux<DataBuffer> encode(Publisher<? extends byte[]> inputStream,
DataBufferFactory bufferFactory, ResolvableType elementType, @Nullable MimeType mimeType,
@Nullable Map<String, Object> hints) {
// Use (byte[] bytes) for Eclipse
return Flux.from(inputStream).map((byte[] bytes) ->
encodeValue(bytes, bufferFactory, elementType, mimeType, hints));
}
@Override
public DataBuffer encodeValue(byte[] bytes, DataBufferFactory bufferFactory,
ResolvableType valueType, @Nullable MimeType mimeType, @Nullable Map<String, Object> hints) {
DataBuffer dataBuffer = bufferFactory.wrap(bytes);
if (logger.isDebugEnabled() && !Hints.isLoggingSuppressed(hints)) {
String logPrefix = Hints.getLogPrefix(hints);
logger.debug(logPrefix + "Writing " + dataBuffer.readableByteCount() + " bytes");
}
return dataBuffer;
}
}
| ByteArrayEncoder |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/serialization/entity/WithEmbeddedId.java | {
"start": 272,
"end": 458
} | class ____ be in a package that is different from the test
* so that the test does not have access to the private embedded ID.
*
* @author Gail Badner
*/
@Entity
@Cacheable
public | should |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/http/codec/xml/Jaxb2XmlEncoder.java | {
"start": 3738,
"end": 5458
} | class
____ Mono.fromCallable(() -> encodeValue(value, bufferFactory, valueType, mimeType, hints)).flux();
}
@Override
public DataBuffer encodeValue(Object value, DataBufferFactory bufferFactory,
ResolvableType valueType, @Nullable MimeType mimeType, @Nullable Map<String, Object> hints) {
if (!Hints.isLoggingSuppressed(hints)) {
LogFormatUtils.traceDebug(logger, traceOn -> {
String formatted = LogFormatUtils.formatValue(value, !traceOn);
return Hints.getLogPrefix(hints) + "Encoding [" + formatted + "]";
});
}
boolean release = true;
DataBuffer buffer = bufferFactory.allocateBuffer(1024);
try {
OutputStream outputStream = buffer.asOutputStream();
Class<?> clazz = getMarshallerType(value);
Marshaller marshaller = initMarshaller(clazz);
marshaller.marshal(value, outputStream);
release = false;
return buffer;
}
catch (MarshalException ex) {
throw new EncodingException("Could not marshal " + value.getClass() + " to XML", ex);
}
catch (JAXBException ex) {
throw new CodecException("Invalid JAXB configuration", ex);
}
finally {
if (release) {
DataBufferUtils.release(buffer);
}
}
}
private static Class<?> getMarshallerType(Object value) {
if (value instanceof JAXBElement<?> jaxbElement) {
return jaxbElement.getDeclaredType();
}
else {
return ClassUtils.getUserClass(value);
}
}
private Marshaller initMarshaller(Class<?> clazz) throws CodecException, JAXBException {
Marshaller marshaller = this.jaxbContexts.createMarshaller(clazz);
marshaller.setProperty(Marshaller.JAXB_ENCODING, StandardCharsets.UTF_8.name());
marshaller = this.marshallerProcessor.apply(marshaller);
return marshaller;
}
}
| return |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java | {
"start": 1479,
"end": 11615
} | class ____ implements ConnectionManager {
private static final Logger logger = LogManager.getLogger(RemoteConnectionManager.class);
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RemoteConnectionManager.class);
private final String clusterAlias;
private final RemoteClusterCredentialsManager credentialsManager;
private final ConnectionManager delegate;
private final AtomicLong counter = new AtomicLong();
private volatile List<DiscoveryNode> connectedNodes = Collections.emptyList();
RemoteConnectionManager(String clusterAlias, RemoteClusterCredentialsManager credentialsManager, ConnectionManager delegate) {
this.clusterAlias = clusterAlias;
this.credentialsManager = credentialsManager;
this.delegate = delegate;
this.delegate.addListener(new TransportConnectionListener() {
@Override
public void onNodeConnected(DiscoveryNode node, Transport.Connection connection) {
addConnectedNode(node);
try {
// called when a node is successfully connected through a proxy connection
maybeLogDeprecationWarning(wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, credentialsManager));
} catch (Exception e) {
logger.warn("Failed to log deprecation warning.", e);
}
}
@Override
public void onNodeDisconnected(DiscoveryNode node, @Nullable Exception closeException) {
removeConnectedNode(node);
}
});
}
public RemoteClusterCredentialsManager getCredentialsManager() {
return credentialsManager;
}
/**
* Remote cluster connections have a different lifecycle from intra-cluster connections. Use {@link #connectToRemoteClusterNode}
* instead of this method.
*/
@Override
public final void connectToNode(
DiscoveryNode node,
ConnectionProfile connectionProfile,
ConnectionValidator connectionValidator,
ActionListener<Releasable> listener
) throws ConnectTransportException {
// it's a mistake to call this expecting a useful Releasable back, we never release remote cluster connections today.
assert false : "use connectToRemoteClusterNode instead";
listener.onFailure(new UnsupportedOperationException("use connectToRemoteClusterNode instead"));
}
public void connectToRemoteClusterNode(DiscoveryNode node, ConnectionValidator connectionValidator, ActionListener<Void> listener)
throws ConnectTransportException {
delegate.connectToNode(node, null, connectionValidator, listener.map(connectionReleasable -> {
// We drop the connectionReleasable here but it's not really a leak: we never close individual connections to a remote cluster
// ourselves - instead we close the whole connection manager if the remote cluster is removed, which bypasses any refcounting
// and just closes the underlying channels.
return null;
}));
}
@Override
public void addListener(TransportConnectionListener listener) {
delegate.addListener(listener);
}
@Override
public void removeListener(TransportConnectionListener listener) {
delegate.removeListener(listener);
}
@Override
public void openConnection(DiscoveryNode node, @Nullable ConnectionProfile profile, ActionListener<Transport.Connection> listener) {
assert profile == null || profile.getTransportProfile().equals(getConnectionProfile().getTransportProfile())
: "A single remote connection manager can only ever handle a single transport profile";
delegate.openConnection(
node,
profile,
listener.delegateFailureAndWrap(
(l, connection) -> l.onResponse(
maybeLogDeprecationWarning(wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, credentialsManager))
)
)
);
}
private InternalRemoteConnection maybeLogDeprecationWarning(InternalRemoteConnection connection) {
if (connection.getClusterCredentials() == null
&& (false == REMOTE_CLUSTER_PROFILE.equals(this.getConnectionProfile().getTransportProfile()))) {
deprecationLogger.warn(
DeprecationCategory.SECURITY,
"remote_cluster_certificate_access-" + connection.getClusterAlias(),
"The remote cluster connection to [{}] is using the certificate-based security model. "
+ "The certificate-based security model is deprecated and will be removed in a future major version. "
+ "Migrate the remote cluster from the certificate-based to the API key-based security model.",
connection.getClusterAlias()
);
}
return connection;
}
@Override
public Transport.Connection getConnection(DiscoveryNode node) {
try {
return getConnectionInternal(node);
} catch (NodeNotConnectedException e) {
return new ProxyConnection(getAnyRemoteConnection(), node);
}
}
@Override
public boolean nodeConnected(DiscoveryNode node) {
return delegate.nodeConnected(node);
}
@Override
public void disconnectFromNode(DiscoveryNode node) {
delegate.disconnectFromNode(node);
}
@Override
public ConnectionProfile getConnectionProfile() {
return delegate.getConnectionProfile();
}
public Transport.Connection getAnyRemoteConnection() {
List<DiscoveryNode> localConnectedNodes = this.connectedNodes;
long curr;
while ((curr = counter.incrementAndGet()) == Long.MIN_VALUE)
;
if (localConnectedNodes.isEmpty() == false) {
DiscoveryNode nextNode = localConnectedNodes.get(Math.floorMod(curr, localConnectedNodes.size()));
try {
return getConnectionInternal(nextNode);
} catch (NodeNotConnectedException e) {
// Ignore. We will manually create an iterator of open nodes
}
}
Set<DiscoveryNode> allConnectionNodes = getAllConnectedNodes();
for (DiscoveryNode connectedNode : allConnectionNodes) {
try {
return getConnectionInternal(connectedNode);
} catch (NodeNotConnectedException e) {
// Ignore. We will try the next one until all are exhausted.
}
}
throw new ConnectTransportException(null, "Unable to connect to [" + clusterAlias + "]");
}
@Override
public Set<DiscoveryNode> getAllConnectedNodes() {
return delegate.getAllConnectedNodes();
}
@Override
public int size() {
return delegate.size();
}
@Override
public void close() {
delegate.closeNoBlock();
}
@Override
public void closeNoBlock() {
delegate.closeNoBlock();
}
/**
* This method returns a remote cluster alias for the given transport connection if it targets a node in the remote cluster.
* This method will return an optional empty in case the connection targets the local node or the node in the local cluster.
*
* @param connection the transport connection for which to resolve a remote cluster alias
* @return a cluster alias if the connection target a node in the remote cluster, otherwise an empty result
*/
public static Optional<String> resolveRemoteClusterAlias(Transport.Connection connection) {
return resolveRemoteClusterAliasWithCredentials(connection).map(RemoteClusterAliasWithCredentials::clusterAlias);
}
public record RemoteClusterAliasWithCredentials(String clusterAlias, @Nullable SecureString credentials) {
@Override
public String toString() {
return "RemoteClusterAliasWithCredentials{clusterAlias='" + clusterAlias + "', credentials='::es_redacted::'}";
}
}
/**
* This method returns information (alias and credentials) for remote cluster for the given transport connection.
* Either or both of alias and credentials can be null depending on the connection.
*
* @param connection the transport connection for which to resolve a remote cluster alias
*/
public static Optional<RemoteClusterAliasWithCredentials> resolveRemoteClusterAliasWithCredentials(Transport.Connection connection) {
Transport.Connection unwrapped = TransportService.unwrapConnection(connection);
if (unwrapped instanceof InternalRemoteConnection remoteConnection) {
return Optional.of(
new RemoteClusterAliasWithCredentials(remoteConnection.getClusterAlias(), remoteConnection.getClusterCredentials())
);
}
return Optional.empty();
}
private Transport.Connection getConnectionInternal(DiscoveryNode node) throws NodeNotConnectedException {
Transport.Connection connection = delegate.getConnection(node);
return wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, credentialsManager);
}
private synchronized void addConnectedNode(DiscoveryNode addedNode) {
this.connectedNodes = CollectionUtils.appendToCopy(this.connectedNodes, addedNode);
}
private synchronized void removeConnectedNode(DiscoveryNode removedNode) {
int newSize = this.connectedNodes.size() - 1;
ArrayList<DiscoveryNode> newConnectedNodes = new ArrayList<>(newSize);
for (DiscoveryNode connectedNode : this.connectedNodes) {
if (connectedNode.equals(removedNode) == false) {
newConnectedNodes.add(connectedNode);
}
}
assert newConnectedNodes.size() == newSize : "Expected connection node count: " + newSize + ", Found: " + newConnectedNodes.size();
this.connectedNodes = Collections.unmodifiableList(newConnectedNodes);
}
static final | RemoteConnectionManager |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheck.java | {
"start": 896,
"end": 1360
} | class ____ loaded by the right classloader.
*/
public static void checkClassLoader(Class cls,
boolean shouldBeLoadedByAppClassLoader) {
boolean loadedByAppClassLoader =
cls.getClassLoader() instanceof ApplicationClassLoader;
if ((shouldBeLoadedByAppClassLoader && !loadedByAppClassLoader) ||
(!shouldBeLoadedByAppClassLoader && loadedByAppClassLoader)) {
throw new RuntimeException("incorrect classloader used");
}
}
}
| is |
java | apache__kafka | group-coordinator/src/main/java/org/apache/kafka/coordinator/group/GroupCoordinatorRecordHelpers.java | {
"start": 4224,
"end": 30837
} | class ____ {
private static final short GROUP_METADATA_VALUE_VERSION = 3;
private GroupCoordinatorRecordHelpers() {}
/**
* Creates a ConsumerGroupMemberMetadata record.
*
* @param groupId The consumer group id.
* @param member The consumer group member.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupMemberSubscriptionRecord(
String groupId,
ConsumerGroupMember member
) {
List<String> topicNames = new ArrayList<>(member.subscribedTopicNames());
Collections.sort(topicNames);
return CoordinatorRecord.record(
new ConsumerGroupMemberMetadataKey()
.setGroupId(groupId)
.setMemberId(member.memberId()),
new ApiMessageAndVersion(
new ConsumerGroupMemberMetadataValue()
.setRackId(member.rackId())
.setInstanceId(member.instanceId())
.setClientId(member.clientId())
.setClientHost(member.clientHost())
.setSubscribedTopicNames(topicNames)
.setSubscribedTopicRegex(member.subscribedTopicRegex())
.setServerAssignor(member.serverAssignorName().orElse(null))
.setRebalanceTimeoutMs(member.rebalanceTimeoutMs())
.setClassicMemberMetadata(member.classicMemberMetadata().orElse(null)),
(short) 0
)
);
}
/**
* Creates a ConsumerGroupMemberMetadata tombstone.
*
* @param groupId The consumer group id.
* @param memberId The consumer group member id.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupMemberSubscriptionTombstoneRecord(
String groupId,
String memberId
) {
return CoordinatorRecord.tombstone(
new ConsumerGroupMemberMetadataKey()
.setGroupId(groupId)
.setMemberId(memberId)
);
}
/**
* Creates a ConsumerGroupPartitionMetadata tombstone.
*
* @param groupId The consumer group id.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupSubscriptionMetadataTombstoneRecord(
String groupId
) {
return CoordinatorRecord.tombstone(
new ConsumerGroupPartitionMetadataKey()
.setGroupId(groupId)
);
}
/**
* Creates a ConsumerGroupMetadata record.
*
* @param groupId The consumer group id.
* @param newGroupEpoch The consumer group epoch.
* @param metadataHash The consumer group metadata hash.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupEpochRecord(
String groupId,
int newGroupEpoch,
long metadataHash
) {
return CoordinatorRecord.record(
new ConsumerGroupMetadataKey()
.setGroupId(groupId),
new ApiMessageAndVersion(
new ConsumerGroupMetadataValue()
.setEpoch(newGroupEpoch)
.setMetadataHash(metadataHash),
(short) 0
)
);
}
/**
* Creates a ConsumerGroupMetadata tombstone.
*
* @param groupId The consumer group id.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupEpochTombstoneRecord(
String groupId
) {
return CoordinatorRecord.tombstone(
new ConsumerGroupMetadataKey()
.setGroupId(groupId)
);
}
/**
* Creates a ConsumerGroupTargetAssignmentMember record.
*
* @param groupId The consumer group id.
* @param memberId The consumer group member id.
* @param partitions The target partitions of the member.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupTargetAssignmentRecord(
String groupId,
String memberId,
Map<Uuid, Set<Integer>> partitions
) {
List<ConsumerGroupTargetAssignmentMemberValue.TopicPartition> topicPartitions =
new ArrayList<>(partitions.size());
for (Map.Entry<Uuid, Set<Integer>> entry : partitions.entrySet()) {
topicPartitions.add(
new ConsumerGroupTargetAssignmentMemberValue.TopicPartition()
.setTopicId(entry.getKey())
.setPartitions(new ArrayList<>(entry.getValue()))
);
}
return CoordinatorRecord.record(
new ConsumerGroupTargetAssignmentMemberKey()
.setGroupId(groupId)
.setMemberId(memberId),
new ApiMessageAndVersion(
new ConsumerGroupTargetAssignmentMemberValue()
.setTopicPartitions(topicPartitions),
(short) 0
)
);
}
/**
* Creates a ConsumerGroupTargetAssignmentMember tombstone.
*
* @param groupId The consumer group id.
* @param memberId The consumer group member id.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupTargetAssignmentTombstoneRecord(
String groupId,
String memberId
) {
return CoordinatorRecord.tombstone(
new ConsumerGroupTargetAssignmentMemberKey()
.setGroupId(groupId)
.setMemberId(memberId)
);
}
/**
* Creates a ConsumerGroupTargetAssignmentMetadata record.
*
* @param groupId The consumer group id.
* @param assignmentEpoch The consumer group epoch.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupTargetAssignmentEpochRecord(
String groupId,
int assignmentEpoch
) {
return CoordinatorRecord.record(
new ConsumerGroupTargetAssignmentMetadataKey()
.setGroupId(groupId),
new ApiMessageAndVersion(
new ConsumerGroupTargetAssignmentMetadataValue()
.setAssignmentEpoch(assignmentEpoch),
(short) 0
)
);
}
/**
* Creates a ConsumerGroupTargetAssignmentMetadata tombstone.
*
* @param groupId The consumer group id.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupTargetAssignmentEpochTombstoneRecord(
String groupId
) {
return CoordinatorRecord.tombstone(
new ConsumerGroupTargetAssignmentMetadataKey()
.setGroupId(groupId)
);
}
/**
* Creates a ConsumerGroupCurrentMemberAssignment record.
*
* @param groupId The consumer group id.
* @param member The consumer group member.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupCurrentAssignmentRecord(
String groupId,
ConsumerGroupMember member
) {
return CoordinatorRecord.record(
new ConsumerGroupCurrentMemberAssignmentKey()
.setGroupId(groupId)
.setMemberId(member.memberId()),
new ApiMessageAndVersion(
new ConsumerGroupCurrentMemberAssignmentValue()
.setMemberEpoch(member.memberEpoch())
.setPreviousMemberEpoch(member.previousMemberEpoch())
.setState(member.state().value())
.setAssignedPartitions(toTopicPartitions(member.assignedPartitions()))
.setPartitionsPendingRevocation(toTopicPartitions(member.partitionsPendingRevocation())),
(short) 0
)
);
}
/**
* Creates a ConsumerGroupCurrentMemberAssignment tombstone.
*
* @param groupId The consumer group id.
* @param memberId The consumer group member id.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupCurrentAssignmentTombstoneRecord(
String groupId,
String memberId
) {
return CoordinatorRecord.tombstone(
new ConsumerGroupCurrentMemberAssignmentKey()
.setGroupId(groupId)
.setMemberId(memberId)
);
}
/**
* Creates a ConsumerGroupRegularExpression record.
*
* @param groupId The consumer group id.
* @param regex The regular expression.
* @param resolvedRegularExpression The metadata associated with the regular expression.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupRegularExpressionRecord(
String groupId,
String regex,
ResolvedRegularExpression resolvedRegularExpression
) {
List<String> topics = new ArrayList<>(resolvedRegularExpression.topics());
Collections.sort(topics);
return CoordinatorRecord.record(
new ConsumerGroupRegularExpressionKey()
.setGroupId(groupId)
.setRegularExpression(regex),
new ApiMessageAndVersion(
new ConsumerGroupRegularExpressionValue()
.setTopics(topics)
.setVersion(resolvedRegularExpression.version())
.setTimestamp(resolvedRegularExpression.timestamp()),
(short) 0
)
);
}
/**
* Creates a ConsumerGroupRegularExpression tombstone.
*
* @param groupId The consumer group id.
* @param regex The regular expression.
* @return The record.
*/
public static CoordinatorRecord newConsumerGroupRegularExpressionTombstone(
String groupId,
String regex
) {
return CoordinatorRecord.tombstone(
new ConsumerGroupRegularExpressionKey()
.setGroupId(groupId)
.setRegularExpression(regex)
);
}
/**
* Creates a GroupMetadata record.
*
* @param group The classic group.
* @param assignment The classic group assignment.
* @return The record.
*/
public static CoordinatorRecord newGroupMetadataRecord(
ClassicGroup group,
Map<String, byte[]> assignment
) {
List<GroupMetadataValue.MemberMetadata> members = new ArrayList<>(group.allMembers().size());
group.allMembers().forEach(member -> {
byte[] subscription = group.protocolName().map(member::metadata).orElse(null);
if (subscription == null) {
throw new IllegalStateException("Attempted to write non-empty group metadata with no defined protocol.");
}
byte[] memberAssignment = assignment.get(member.memberId());
if (memberAssignment == null) {
throw new IllegalStateException("Attempted to write member " + member.memberId() +
" of group " + group.groupId() + " with no assignment.");
}
members.add(
new GroupMetadataValue.MemberMetadata()
.setMemberId(member.memberId())
.setClientId(member.clientId())
.setClientHost(member.clientHost())
.setRebalanceTimeout(member.rebalanceTimeoutMs())
.setSessionTimeout(member.sessionTimeoutMs())
.setGroupInstanceId(member.groupInstanceId().orElse(null))
.setSubscription(subscription)
.setAssignment(memberAssignment)
);
});
return CoordinatorRecord.record(
new GroupMetadataKey()
.setGroup(group.groupId()),
new ApiMessageAndVersion(
new GroupMetadataValue()
.setProtocol(group.protocolName().orElse(null))
.setProtocolType(group.protocolType().orElse(""))
.setGeneration(group.generationId())
.setLeader(group.leaderOrNull())
.setCurrentStateTimestamp(group.currentStateTimestampOrDefault())
.setMembers(members),
GROUP_METADATA_VALUE_VERSION
)
);
}
/**
* Creates a GroupMetadata tombstone.
*
* @param groupId The group id.
* @return The record.
*/
public static CoordinatorRecord newGroupMetadataTombstoneRecord(
String groupId
) {
return CoordinatorRecord.tombstone(
new GroupMetadataKey()
.setGroup(groupId)
);
}
/**
* Creates an empty GroupMetadata record.
*
* @param group The classic group.
* @return The record.
*/
public static CoordinatorRecord newEmptyGroupMetadataRecord(
ClassicGroup group
) {
return CoordinatorRecord.record(
new GroupMetadataKey()
.setGroup(group.groupId()),
new ApiMessageAndVersion(
new GroupMetadataValue()
.setProtocol(null)
.setProtocolType("")
.setGeneration(0)
.setLeader(null)
.setCurrentStateTimestamp(group.currentStateTimestampOrDefault())
.setMembers(List.of()),
GROUP_METADATA_VALUE_VERSION
)
);
}
/**
* Creates an OffsetCommit record.
*
* @param groupId The group id.
* @param topic The topic name.
* @param partitionId The partition id.
* @param offsetAndMetadata The offset and metadata.
* @return The record.
*/
public static CoordinatorRecord newOffsetCommitRecord(
String groupId,
String topic,
int partitionId,
OffsetAndMetadata offsetAndMetadata
) {
short version = offsetCommitValueVersion(offsetAndMetadata.expireTimestampMs.isPresent());
return CoordinatorRecord.record(
new OffsetCommitKey()
.setGroup(groupId)
.setTopic(topic)
.setPartition(partitionId),
new ApiMessageAndVersion(
new OffsetCommitValue()
.setOffset(offsetAndMetadata.committedOffset)
.setLeaderEpoch(offsetAndMetadata.leaderEpoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH))
.setMetadata(offsetAndMetadata.metadata)
.setCommitTimestamp(offsetAndMetadata.commitTimestampMs)
// Version 1 has a non-empty expireTimestamp field
.setExpireTimestamp(offsetAndMetadata.expireTimestampMs.orElse(OffsetCommitRequest.DEFAULT_TIMESTAMP))
.setTopicId(offsetAndMetadata.topicId),
version
)
);
}
static short offsetCommitValueVersion(boolean expireTimestampMs) {
if (expireTimestampMs) {
return 1;
} else {
return 4;
}
}
/**
* Creates an OffsetCommit tombstone record.
*
* @param groupId The group id.
* @param topic The topic name.
* @param partitionId The partition id.
* @return The record.
*/
public static CoordinatorRecord newOffsetCommitTombstoneRecord(
String groupId,
String topic,
int partitionId
) {
return CoordinatorRecord.tombstone(
new OffsetCommitKey()
.setGroup(groupId)
.setTopic(topic)
.setPartition(partitionId)
);
}
/**
* Creates a ShareGroupMemberMetadata record.
*
* @param groupId The consumer group id.
* @param member The consumer group member.
* @return The record.
*/
public static CoordinatorRecord newShareGroupMemberSubscriptionRecord(
String groupId,
ShareGroupMember member
) {
List<String> topicNames = new ArrayList<>(member.subscribedTopicNames());
Collections.sort(topicNames);
return CoordinatorRecord.record(
new ShareGroupMemberMetadataKey()
.setGroupId(groupId)
.setMemberId(member.memberId()),
new ApiMessageAndVersion(
new ShareGroupMemberMetadataValue()
.setRackId(member.rackId())
.setClientId(member.clientId())
.setClientHost(member.clientHost())
.setSubscribedTopicNames(topicNames),
(short) 0
)
);
}
/**
* Creates a ShareGroupMemberMetadata tombstone.
*
* @param groupId The share group id.
* @param memberId The share group member id.
* @return The record.
*/
public static CoordinatorRecord newShareGroupMemberSubscriptionTombstoneRecord(
String groupId,
String memberId
) {
return CoordinatorRecord.tombstone(
new ShareGroupMemberMetadataKey()
.setGroupId(groupId)
.setMemberId(memberId)
);
}
/**
* Creates a ShareGroupMetadata record.
*
* @param groupId The group id.
* @param newGroupEpoch The group epoch.
* @param metadataHash The group metadata hash.
* @return The record.
*/
public static CoordinatorRecord newShareGroupEpochRecord(
String groupId,
int newGroupEpoch,
long metadataHash
) {
return CoordinatorRecord.record(
new ShareGroupMetadataKey()
.setGroupId(groupId),
new ApiMessageAndVersion(
new ShareGroupMetadataValue()
.setEpoch(newGroupEpoch)
.setMetadataHash(metadataHash),
(short) 0
)
);
}
/**
* Creates a ShareGroupMetadata tombstone.
*
* @param groupId The group id.
* @return The record.
*/
public static CoordinatorRecord newShareGroupEpochTombstoneRecord(
String groupId
) {
return CoordinatorRecord.tombstone(
new ShareGroupMetadataKey()
.setGroupId(groupId)
);
}
/**
* Creates a ShareGroupTargetAssignmentMember record.
*
* @param groupId The group id.
* @param memberId The group member id.
* @param partitions The target partitions of the member.
* @return The record.
*/
public static CoordinatorRecord newShareGroupTargetAssignmentRecord(
String groupId,
String memberId,
Map<Uuid, Set<Integer>> partitions
) {
List<ShareGroupTargetAssignmentMemberValue.TopicPartition> topicPartitions =
new ArrayList<>(partitions.size());
for (Map.Entry<Uuid, Set<Integer>> entry : partitions.entrySet()) {
topicPartitions.add(
new ShareGroupTargetAssignmentMemberValue.TopicPartition()
.setTopicId(entry.getKey())
.setPartitions(new ArrayList<>(entry.getValue()))
);
}
return CoordinatorRecord.record(
new ShareGroupTargetAssignmentMemberKey()
.setGroupId(groupId)
.setMemberId(memberId),
new ApiMessageAndVersion(
new ShareGroupTargetAssignmentMemberValue()
.setTopicPartitions(topicPartitions),
(short) 0
)
);
}
/**
* Creates a ShareGroupTargetAssignmentMember tombstone.
*
* @param groupId The group id.
* @param memberId The group member id.
* @return The record.
*/
public static CoordinatorRecord newShareGroupTargetAssignmentTombstoneRecord(
String groupId,
String memberId
) {
return CoordinatorRecord.tombstone(
new ShareGroupTargetAssignmentMemberKey()
.setGroupId(groupId)
.setMemberId(memberId)
);
}
/**
* Creates a ShareGroupTargetAssignmentMetadata record.
*
* @param groupId The group id.
* @param assignmentEpoch The group epoch.
* @return The record.
*/
public static CoordinatorRecord newShareGroupTargetAssignmentEpochRecord(
String groupId,
int assignmentEpoch
) {
return CoordinatorRecord.record(
new ShareGroupTargetAssignmentMetadataKey()
.setGroupId(groupId),
new ApiMessageAndVersion(
new ShareGroupTargetAssignmentMetadataValue()
.setAssignmentEpoch(assignmentEpoch),
(short) 0
)
);
}
/**
* Creates a ShareGroupTargetAssignmentMetadata tombstone.
*
* @param groupId The group id.
* @return The record.
*/
public static CoordinatorRecord newShareGroupTargetAssignmentEpochTombstoneRecord(
String groupId
) {
return CoordinatorRecord.tombstone(
new ShareGroupTargetAssignmentMetadataKey()
.setGroupId(groupId)
);
}
/**
* Creates a ShareGroupCurrentMemberAssignment record.
*
* @param groupId The group id.
* @param member The group member.
* @return The record.
*/
public static CoordinatorRecord newShareGroupCurrentAssignmentRecord(
String groupId,
ShareGroupMember member
) {
return CoordinatorRecord.record(
new ShareGroupCurrentMemberAssignmentKey()
.setGroupId(groupId)
.setMemberId(member.memberId()),
new ApiMessageAndVersion(
new ShareGroupCurrentMemberAssignmentValue()
.setMemberEpoch(member.memberEpoch())
.setPreviousMemberEpoch(member.previousMemberEpoch())
.setState(member.state().value())
.setAssignedPartitions(toShareGroupTopicPartitions(member.assignedPartitions())),
(short) 0
)
);
}
/**
* Creates a ConsumerGroupCurrentMemberAssignment tombstone.
*
* @param groupId The consumer group id.
* @param memberId The consumer group member id.
* @return The record.
*/
public static CoordinatorRecord newShareGroupCurrentAssignmentTombstoneRecord(
String groupId,
String memberId
) {
return CoordinatorRecord.tombstone(
new ShareGroupCurrentMemberAssignmentKey()
.setGroupId(groupId)
.setMemberId(memberId)
);
}
/**
* Creates a ShareGroupStatePartitionMetadata tombstone.
*
* @param groupId The share group id.
* @return The record.
*/
public static CoordinatorRecord newShareGroupStatePartitionMetadataTombstoneRecord(
String groupId
) {
return CoordinatorRecord.tombstone(
new ShareGroupStatePartitionMetadataKey()
.setGroupId(groupId)
);
}
/**
* Creates a ShareGroupStatePartitionMetadata record.
*
* @param groupId The share group id.
* @param initializedTopics Topics which have been initialized.
* @param deletingTopics Topics which are being deleted.
* @return The record.
*/
public static CoordinatorRecord newShareGroupStatePartitionMetadataRecord(
String groupId,
Map<Uuid, InitMapValue> initializingTopics,
Map<Uuid, InitMapValue> initializedTopics,
Map<Uuid, String> deletingTopics
) {
List<ShareGroupStatePartitionMetadataValue.TopicPartitionsInfo> initializingTopicPartitionInfo = initializingTopics.entrySet().stream()
.map(entry -> new ShareGroupStatePartitionMetadataValue.TopicPartitionsInfo()
.setTopicId(entry.getKey())
.setTopicName(entry.getValue().name())
.setPartitions(entry.getValue().partitions().stream().toList()))
.toList();
List<ShareGroupStatePartitionMetadataValue.TopicPartitionsInfo> initializedTopicPartitionInfo = initializedTopics.entrySet().stream()
.map(entry -> new ShareGroupStatePartitionMetadataValue.TopicPartitionsInfo()
.setTopicId(entry.getKey())
.setTopicName(entry.getValue().name())
.setPartitions(entry.getValue().partitions().stream().toList()))
.toList();
List<ShareGroupStatePartitionMetadataValue.TopicInfo> deletingTopicsInfo = deletingTopics.entrySet().stream()
.map(entry -> new ShareGroupStatePartitionMetadataValue.TopicInfo()
.setTopicId(entry.getKey())
.setTopicName(entry.getValue()))
.toList();
return CoordinatorRecord.record(
new ShareGroupStatePartitionMetadataKey()
.setGroupId(groupId),
new ApiMessageAndVersion(
new ShareGroupStatePartitionMetadataValue()
.setInitializingTopics(initializingTopicPartitionInfo)
.setInitializedTopics(initializedTopicPartitionInfo)
.setDeletingTopics(deletingTopicsInfo),
(short) 0
)
);
}
private static List<ConsumerGroupCurrentMemberAssignmentValue.TopicPartitions> toTopicPartitions(
Map<Uuid, Set<Integer>> topicPartitions
) {
List<ConsumerGroupCurrentMemberAssignmentValue.TopicPartitions> topics = new ArrayList<>(topicPartitions.size());
topicPartitions.forEach((topicId, partitions) ->
topics.add(new ConsumerGroupCurrentMemberAssignmentValue.TopicPartitions()
.setTopicId(topicId)
.setPartitions(new ArrayList<>(partitions)))
);
return topics;
}
private static List<ShareGroupCurrentMemberAssignmentValue.TopicPartitions> toShareGroupTopicPartitions(
Map<Uuid, Set<Integer>> topicPartitions
) {
List<ShareGroupCurrentMemberAssignmentValue.TopicPartitions> topics = new ArrayList<>(topicPartitions.size());
topicPartitions.forEach((topicId, partitions) ->
topics.add(new ShareGroupCurrentMemberAssignmentValue.TopicPartitions()
.setTopicId(topicId)
.setPartitions(new ArrayList<>(partitions)))
);
return topics;
}
}
| GroupCoordinatorRecordHelpers |
java | quarkusio__quarkus | integration-tests/micrometer-mp-metrics/src/main/java/io/quarkus/it/micrometer/mpmetrics/MessageResource.java | {
"start": 470,
"end": 1730
} | class ____ {
private final MeterRegistry registry;
private final Counter first;
private final Counter second;
public MessageResource(MeterRegistry registry,
@Metric(name = "first-counter") final Counter first,
@Metric(name = "second-counter") final Counter second) {
this.registry = registry;
this.first = Objects.requireNonNull(first);
this.second = Objects.requireNonNull(second);
}
@GET
public String message() {
first.inc();
second.inc();
return registry.getClass().getName();
}
@GET
@Path("fail")
public String fail() {
first.inc();
throw new NullPointerException("Failed on purpose");
}
@GET
@Path("item/{id}")
public String item(@PathParam("id") String id) {
second.inc();
return "return message with id " + id;
}
@GET
@Path("mpmetrics")
public String metrics() {
Collection<Meter> meters = Search.in(registry).name(s -> s.contains("mpmetrics")).meters();
meters.addAll(Search.in(registry).name(s -> s.endsWith("-counter")).meters());
return meters.stream().allMatch(x -> x.getId().getTag("scope") != null) ? "OK" : "FAIL";
}
}
| MessageResource |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ConfigureShadedAWSSocketFactory.java | {
"start": 1280,
"end": 1312
} | class ____ not link.
*/
public | will |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java | {
"start": 3289,
"end": 33395
} | class ____ implements JournalManager {
static final Logger LOG = LoggerFactory.getLogger(QuorumJournalManager.class);
// This config is not publicly exposed
public static final String QJM_RPC_MAX_TXNS_KEY =
"dfs.ha.tail-edits.qjm.rpc.max-txns";
public static final int QJM_RPC_MAX_TXNS_DEFAULT = 5000;
// Maximum number of transactions to fetch at a time when using the
// RPC edit fetch mechanism
private final int maxTxnsPerRpc;
// Whether or not in-progress tailing is enabled in the configuration
private final boolean inProgressTailingEnabled;
// Timeouts for which the QJM will wait for each of the following actions.
private final int startSegmentTimeoutMs;
private final int prepareRecoveryTimeoutMs;
private final int acceptRecoveryTimeoutMs;
private final int finalizeSegmentTimeoutMs;
private final int selectInputStreamsTimeoutMs;
private final int getJournalStateTimeoutMs;
private final int newEpochTimeoutMs;
private final int writeTxnsTimeoutMs;
// This timeout is used for calls that don't occur during normal operation
// e.g. format, upgrade operations and a few others. So we can use rather
// lengthy timeouts by default.
private final int timeoutMs;
private final Configuration conf;
private final URI uri;
private final NamespaceInfo nsInfo;
private final String nameServiceId;
private boolean isActiveWriter;
private final AsyncLoggerSet loggers;
private static final int OUTPUT_BUFFER_CAPACITY_DEFAULT = 512 * 1024;
private int outputBufferCapacity;
private final URLConnectionFactory connectionFactory;
/** Limit logging about input stream selection to every 5 seconds max. */
private static final long SELECT_INPUT_STREAM_LOG_INTERVAL_MS = 5000;
private final LogThrottlingHelper selectInputStreamLogHelper =
new LogThrottlingHelper(SELECT_INPUT_STREAM_LOG_INTERVAL_MS);
@VisibleForTesting
public QuorumJournalManager(Configuration conf,
URI uri,
NamespaceInfo nsInfo) throws IOException {
this(conf, uri, nsInfo, null, IPCLoggerChannel.FACTORY);
}
public QuorumJournalManager(Configuration conf,
URI uri, NamespaceInfo nsInfo, String nameServiceId) throws IOException {
this(conf, uri, nsInfo, nameServiceId, IPCLoggerChannel.FACTORY);
}
@VisibleForTesting
QuorumJournalManager(Configuration conf,
URI uri, NamespaceInfo nsInfo,
AsyncLogger.Factory loggerFactory) throws IOException {
this(conf, uri, nsInfo, null, loggerFactory);
}
QuorumJournalManager(Configuration conf,
URI uri, NamespaceInfo nsInfo, String nameServiceId,
AsyncLogger.Factory loggerFactory) throws IOException {
Preconditions.checkArgument(conf != null, "must be configured");
this.conf = conf;
this.uri = uri;
this.nsInfo = nsInfo;
this.nameServiceId = nameServiceId;
this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory));
this.maxTxnsPerRpc =
conf.getInt(QJM_RPC_MAX_TXNS_KEY, QJM_RPC_MAX_TXNS_DEFAULT);
Preconditions.checkArgument(maxTxnsPerRpc > 0,
"Must specify %s greater than 0!", QJM_RPC_MAX_TXNS_KEY);
this.inProgressTailingEnabled = conf.getBoolean(
DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY,
DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_DEFAULT);
// Configure timeouts.
this.startSegmentTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_DEFAULT);
this.prepareRecoveryTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_DEFAULT);
this.acceptRecoveryTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_DEFAULT);
this.finalizeSegmentTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_FINALIZE_SEGMENT_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_FINALIZE_SEGMENT_TIMEOUT_DEFAULT);
this.selectInputStreamsTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_DEFAULT);
this.getJournalStateTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_DEFAULT);
this.newEpochTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_DEFAULT);
this.writeTxnsTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT);
this.timeoutMs = (int) conf.getTimeDuration(DFSConfigKeys
.DFS_QJM_OPERATIONS_TIMEOUT,
DFSConfigKeys.DFS_QJM_OPERATIONS_TIMEOUT_DEFAULT, TimeUnit
.MILLISECONDS);
int connectTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_HTTP_OPEN_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_HTTP_OPEN_TIMEOUT_DEFAULT);
int readTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_QJOURNAL_HTTP_READ_TIMEOUT_KEY,
DFSConfigKeys.DFS_QJOURNAL_HTTP_READ_TIMEOUT_DEFAULT);
this.connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(connectTimeoutMs, readTimeoutMs, conf);
setOutputBufferCapacity(OUTPUT_BUFFER_CAPACITY_DEFAULT);
}
protected List<AsyncLogger> createLoggers(
AsyncLogger.Factory factory) throws IOException {
return createLoggers(conf, uri, nsInfo, factory, nameServiceId);
}
static String parseJournalId(URI uri) {
String path = uri.getPath();
Preconditions.checkArgument(path != null && !path.isEmpty(),
"Bad URI '%s': must identify journal in path component",
uri);
String journalId = path.substring(1);
checkJournalId(journalId);
return journalId;
}
public static void checkJournalId(String jid) {
Preconditions.checkArgument(jid != null &&
!jid.isEmpty() &&
!jid.contains("/") &&
!jid.startsWith("."),
"bad journal id: " + jid);
}
/**
* Fence any previous writers, and obtain a unique epoch number
* for write-access to the journal nodes.
*
* @return the new, unique epoch number
*/
Map<AsyncLogger, NewEpochResponseProto> createNewUniqueEpoch()
throws IOException {
Preconditions.checkState(!loggers.isEpochEstablished(),
"epoch already created");
Map<AsyncLogger, GetJournalStateResponseProto> lastPromises =
loggers.waitForWriteQuorum(loggers.getJournalState(),
getJournalStateTimeoutMs, "getJournalState()");
long maxPromised = Long.MIN_VALUE;
for (GetJournalStateResponseProto resp : lastPromises.values()) {
maxPromised = Math.max(maxPromised, resp.getLastPromisedEpoch());
}
assert maxPromised >= 0;
long myEpoch = maxPromised + 1;
Map<AsyncLogger, NewEpochResponseProto> resps =
loggers.waitForWriteQuorum(loggers.newEpoch(nsInfo, myEpoch),
newEpochTimeoutMs, "newEpoch(" + myEpoch + ")");
loggers.setEpoch(myEpoch);
return resps;
}
@Override
public void format(NamespaceInfo nsInfo, boolean force) throws IOException {
QuorumCall<AsyncLogger, Void> call = loggers.format(nsInfo, force);
try {
call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
"format");
} catch (InterruptedException e) {
throw new IOException("Interrupted waiting for format() response");
} catch (TimeoutException e) {
throw new IOException("Timed out waiting for format() response");
}
if (call.countExceptions() > 0) {
call.rethrowException("Could not format one or more JournalNodes");
}
}
@Override
public boolean hasSomeData() throws IOException {
QuorumCall<AsyncLogger, Boolean> call =
loggers.isFormatted();
try {
call.waitFor(loggers.size(), 0, 0, timeoutMs, "hasSomeData");
} catch (InterruptedException e) {
throw new IOException("Interrupted while determining if JNs have data");
} catch (TimeoutException e) {
throw new IOException("Timed out waiting for response from loggers");
}
if (call.countExceptions() > 0) {
call.rethrowException(
"Unable to check if JNs are ready for formatting");
}
// If any of the loggers returned with a non-empty manifest, then
// we should prompt for format.
for (Boolean hasData : call.getResults().values()) {
if (hasData) {
return true;
}
}
// Otherwise, none were formatted, we can safely format.
return false;
}
/**
* Run recovery/synchronization for a specific segment.
* Postconditions:
* <ul>
* <li>This segment will be finalized on a majority
* of nodes.</li>
* <li>All nodes which contain the finalized segment will
* agree on the length.</li>
* </ul>
*
* @param segmentTxId the starting txid of the segment
* @throws IOException
*/
private void recoverUnclosedSegment(long segmentTxId) throws IOException {
Preconditions.checkArgument(segmentTxId > 0);
LOG.info("Beginning recovery of unclosed segment starting at txid " +
segmentTxId);
// Step 1. Prepare recovery
QuorumCall<AsyncLogger,PrepareRecoveryResponseProto> prepare =
loggers.prepareRecovery(segmentTxId);
Map<AsyncLogger, PrepareRecoveryResponseProto> prepareResponses=
loggers.waitForWriteQuorum(prepare, prepareRecoveryTimeoutMs,
"prepareRecovery(" + segmentTxId + ")");
LOG.info("Recovery prepare phase complete. Responses:\n" +
QuorumCall.mapToString(prepareResponses));
// Determine the logger who either:
// a) Has already accepted a previous proposal that's higher than any
// other
//
// OR, if no such logger exists:
//
// b) Has the longest log starting at this transaction ID
// TODO: we should collect any "ties" and pass the URL for all of them
// when syncing, so we can tolerate failure during recovery better.
Entry<AsyncLogger, PrepareRecoveryResponseProto> bestEntry = Collections.max(
prepareResponses.entrySet(), SegmentRecoveryComparator.INSTANCE);
AsyncLogger bestLogger = bestEntry.getKey();
PrepareRecoveryResponseProto bestResponse = bestEntry.getValue();
// Log the above decision, check invariants.
if (bestResponse.hasAcceptedInEpoch()) {
LOG.info("Using already-accepted recovery for segment " +
"starting at txid " + segmentTxId + ": " +
bestEntry);
} else if (bestResponse.hasSegmentState()) {
LOG.info("Using longest log: " + bestEntry);
} else {
// None of the responses to prepareRecovery() had a segment at the given
// txid. This can happen for example in the following situation:
// - 3 JNs: JN1, JN2, JN3
// - writer starts segment 101 on JN1, then crashes before
// writing to JN2 and JN3
// - during newEpoch(), we saw the segment on JN1 and decide to
// recover segment 101
// - before prepare(), JN1 crashes, and we only talk to JN2 and JN3,
// neither of which has any entry for this log.
// In this case, it is allowed to do nothing for recovery, since the
// segment wasn't started on a quorum of nodes.
// Sanity check: we should only get here if none of the responses had
// a log. This should be a postcondition of the recovery comparator,
// but a bug in the comparator might cause us to get here.
for (PrepareRecoveryResponseProto resp : prepareResponses.values()) {
assert !resp.hasSegmentState() :
"One of the loggers had a response, but no best logger " +
"was found.";
}
LOG.info("None of the responders had a log to recover: " +
QuorumCall.mapToString(prepareResponses));
return;
}
SegmentStateProto logToSync = bestResponse.getSegmentState();
assert segmentTxId == logToSync.getStartTxId();
// Sanity check: none of the loggers should be aware of a higher
// txid than the txid we intend to truncate to
for (Map.Entry<AsyncLogger, PrepareRecoveryResponseProto> e :
prepareResponses.entrySet()) {
AsyncLogger logger = e.getKey();
PrepareRecoveryResponseProto resp = e.getValue();
if (resp.hasLastCommittedTxId() &&
resp.getLastCommittedTxId() > logToSync.getEndTxId()) {
throw new AssertionError("Decided to synchronize log to " + logToSync +
" but logger " + logger + " had seen txid " +
resp.getLastCommittedTxId() + " committed");
}
}
URL syncFromUrl = bestLogger.buildURLToFetchLogs(segmentTxId);
QuorumCall<AsyncLogger,Void> accept = loggers.acceptRecovery(logToSync, syncFromUrl);
loggers.waitForWriteQuorum(accept, acceptRecoveryTimeoutMs,
"acceptRecovery(" + TextFormat.shortDebugString(logToSync) + ")");
// If one of the loggers above missed the synchronization step above, but
// we send a finalize() here, that's OK. It validates the log before
// finalizing. Hence, even if it is not "in sync", it won't incorrectly
// finalize.
QuorumCall<AsyncLogger, Void> finalize =
loggers.finalizeLogSegment(logToSync.getStartTxId(), logToSync.getEndTxId());
loggers.waitForWriteQuorum(finalize, finalizeSegmentTimeoutMs,
String.format("finalizeLogSegment(%s-%s)",
logToSync.getStartTxId(),
logToSync.getEndTxId()));
}
static List<AsyncLogger> createLoggers(Configuration conf,
URI uri,
NamespaceInfo nsInfo,
AsyncLogger.Factory factory,
String nameServiceId)
throws IOException {
List<AsyncLogger> ret = Lists.newArrayList();
List<InetSocketAddress> addrs = Util.getAddressesList(uri, conf);
if (addrs.size() % 2 == 0) {
LOG.warn("Quorum journal URI '" + uri + "' has an even number " +
"of Journal Nodes specified. This is not recommended!");
}
String jid = parseJournalId(uri);
for (InetSocketAddress addr : addrs) {
ret.add(factory.createLogger(conf, nsInfo, jid, nameServiceId, addr));
}
return ret;
}
@Override
public EditLogOutputStream startLogSegment(long txId, int layoutVersion)
throws IOException {
Preconditions.checkState(isActiveWriter,
"must recover segments before starting a new one");
QuorumCall<AsyncLogger, Void> q = loggers.startLogSegment(txId,
layoutVersion);
loggers.waitForWriteQuorum(q, startSegmentTimeoutMs,
"startLogSegment(" + txId + ")");
return new QuorumOutputStream(loggers, txId, outputBufferCapacity,
writeTxnsTimeoutMs, layoutVersion);
}
@Override
public void finalizeLogSegment(long firstTxId, long lastTxId)
throws IOException {
QuorumCall<AsyncLogger,Void> q = loggers.finalizeLogSegment(
firstTxId, lastTxId);
loggers.waitForWriteQuorum(q, finalizeSegmentTimeoutMs,
String.format("finalizeLogSegment(%s-%s)", firstTxId, lastTxId));
}
@Override
public void setOutputBufferCapacity(int size) {
int ipcMaxDataLength = conf.getInt(
CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH,
CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
if (size >= ipcMaxDataLength) {
throw new IllegalArgumentException("Attempted to use QJM output buffer "
+ "capacity (" + size + ") greater than the IPC max data length ("
+ CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH + " = "
+ ipcMaxDataLength + "). This will cause journals to reject edits.");
}
outputBufferCapacity = size;
}
@Override
public void purgeLogsOlderThan(long minTxIdToKeep) throws IOException {
// This purges asynchronously -- there's no need to wait for a quorum
// here, because it's always OK to fail.
LOG.info("Purging remote journals older than txid " + minTxIdToKeep);
loggers.purgeLogsOlderThan(minTxIdToKeep);
}
@Override
public void recoverUnfinalizedSegments() throws IOException {
Preconditions.checkState(!isActiveWriter, "already active writer");
LOG.info("Starting recovery process for unclosed journal segments...");
Map<AsyncLogger, NewEpochResponseProto> resps = createNewUniqueEpoch();
LOG.info("Successfully started new epoch " + loggers.getEpoch());
if (LOG.isDebugEnabled()) {
LOG.debug("newEpoch({}) responses:\n{}", loggers.getEpoch(), QuorumCall.mapToString(resps));
}
long mostRecentSegmentTxId = Long.MIN_VALUE;
for (NewEpochResponseProto r : resps.values()) {
if (r.hasLastSegmentTxId()) {
mostRecentSegmentTxId = Math.max(mostRecentSegmentTxId,
r.getLastSegmentTxId());
}
}
// On a completely fresh system, none of the journals have any
// segments, so there's nothing to recover.
if (mostRecentSegmentTxId != Long.MIN_VALUE) {
recoverUnclosedSegment(mostRecentSegmentTxId);
}
isActiveWriter = true;
}
@Override
public void close() throws IOException {
loggers.close();
connectionFactory.destroy();
}
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxnId, boolean inProgressOk) throws IOException {
selectInputStreams(streams, fromTxnId, inProgressOk, false);
}
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxnId, boolean inProgressOk,
boolean onlyDurableTxns) throws IOException {
// Some calls will use inProgressOK to get in-progress edits even if
// the cache used for RPC calls is not enabled; fall back to using the
// streaming mechanism to serve such requests
if (inProgressOk && inProgressTailingEnabled) {
LOG.debug("Tailing edits starting from txn ID {} via RPC mechanism", fromTxnId);
try {
Collection<EditLogInputStream> rpcStreams = new ArrayList<>();
selectRpcInputStreams(rpcStreams, fromTxnId, onlyDurableTxns);
streams.addAll(rpcStreams);
return;
} catch (IOException ioe) {
LOG.warn("Encountered exception while tailing edits >= " + fromTxnId +
" via RPC; falling back to streaming.", ioe);
}
}
selectStreamingInputStreams(streams, fromTxnId, inProgressOk,
onlyDurableTxns);
}
/**
* Select input streams from the journals, specifically using the RPC
* mechanism optimized for low latency.
*
* @param streams The collection to store the return streams into.
* @param fromTxnId Select edits starting from this transaction ID
* @param onlyDurableTxns Iff true, only include transactions which have been
* committed to a quorum of the journals.
* @throws IOException Upon issues, including cache misses on the journals.
*/
private void selectRpcInputStreams(Collection<EditLogInputStream> streams,
long fromTxnId, boolean onlyDurableTxns) throws IOException {
QuorumCall<AsyncLogger, GetJournaledEditsResponseProto> q =
loggers.getJournaledEdits(fromTxnId, maxTxnsPerRpc);
Map<AsyncLogger, GetJournaledEditsResponseProto> responseMap =
loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs,
"selectRpcInputStreams");
assert responseMap.size() >= loggers.getMajoritySize() :
"Quorum call returned without a majority";
List<Integer> responseCounts = new ArrayList<>();
for (GetJournaledEditsResponseProto resp : responseMap.values()) {
responseCounts.add(resp.getTxnCount());
}
Collections.sort(responseCounts);
int highestTxnCount = responseCounts.get(responseCounts.size() - 1);
if (LOG.isDebugEnabled() || highestTxnCount < 0) {
StringBuilder msg = new StringBuilder("Requested edits starting from ");
msg.append(fromTxnId).append("; got ").append(responseMap.size())
.append(" responses: <");
for (Map.Entry<AsyncLogger, GetJournaledEditsResponseProto> ent :
responseMap.entrySet()) {
msg.append("[").append(ent.getKey()).append(", ")
.append(ent.getValue().getTxnCount()).append("],");
}
msg.append(">");
if (highestTxnCount < 0) {
throw new IOException("Did not get any valid JournaledEdits " +
"responses: " + msg);
} else {
LOG.debug(msg.toString());
}
}
// Cancel any outstanding calls to JN's.
q.cancelCalls();
int maxAllowedTxns = !onlyDurableTxns ? highestTxnCount :
responseCounts.get(responseCounts.size() - loggers.getMajoritySize());
if (maxAllowedTxns == 0) {
LOG.debug("No new edits available in logs; requested starting from ID {}",
fromTxnId);
return;
}
LogAction logAction = selectInputStreamLogHelper.record(fromTxnId);
if (logAction.shouldLog()) {
LOG.info("Selected loggers with >= " + maxAllowedTxns + " transactions " +
"starting from lowest txn ID " + logAction.getStats(0).getMin() +
LogThrottlingHelper.getLogSupressionMessage(logAction));
}
PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<>(
JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
for (GetJournaledEditsResponseProto resp : responseMap.values()) {
long endTxnId = fromTxnId - 1 +
Math.min(maxAllowedTxns, resp.getTxnCount());
allStreams.add(EditLogFileInputStream.fromByteString(
resp.getEditLog(), fromTxnId, endTxnId, true));
}
JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId);
}
/**
* Select input streams from the journals, specifically using the streaming
* mechanism optimized for resiliency / bulk load.
*/
private void selectStreamingInputStreams(
Collection<EditLogInputStream> streams, long fromTxnId,
boolean inProgressOk, boolean onlyDurableTxns) throws IOException {
QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
loggers.getEditLogManifest(fromTxnId, inProgressOk);
Map<AsyncLogger, RemoteEditLogManifest> resps =
loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs,
"selectStreamingInputStreams");
if (LOG.isDebugEnabled()) {
LOG.debug("selectStreamingInputStream manifests:\n {}",
Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
}
final PriorityQueue<EditLogInputStream> allStreams =
new PriorityQueue<EditLogInputStream>(64,
JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
AsyncLogger logger = e.getKey();
RemoteEditLogManifest manifest = e.getValue();
long committedTxnId = manifest.getCommittedTxnId();
for (RemoteEditLog remoteLog : manifest.getLogs()) {
URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());
long endTxId = remoteLog.getEndTxId();
// If it's bounded by durable Txns, endTxId could not be larger
// than committedTxnId. This ensures the consistency.
// We don't do the following for finalized log segments, since all
// edits in those are guaranteed to be committed.
if (onlyDurableTxns && inProgressOk && remoteLog.isInProgress()) {
endTxId = Math.min(endTxId, committedTxnId);
if (endTxId < remoteLog.getStartTxId()) {
LOG.warn("Found endTxId (" + endTxId + ") that is less than " +
"the startTxId (" + remoteLog.getStartTxId() +
") - setting it to startTxId.");
endTxId = remoteLog.getStartTxId();
}
}
EditLogInputStream elis = EditLogFileInputStream.fromUrl(
connectionFactory, url, remoteLog.getStartTxId(),
endTxId, remoteLog.isInProgress());
allStreams.add(elis);
}
}
JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId);
}
@Override
public String toString() {
return "QJM to " + loggers;
}
@VisibleForTesting
AsyncLoggerSet getLoggerSetForTests() {
return loggers;
}
@Override
public void doPreUpgrade() throws IOException {
QuorumCall<AsyncLogger, Void> call = loggers.doPreUpgrade();
try {
call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
"doPreUpgrade");
if (call.countExceptions() > 0) {
call.rethrowException("Could not do pre-upgrade of one or more JournalNodes");
}
} catch (InterruptedException e) {
throw new IOException("Interrupted waiting for doPreUpgrade() response");
} catch (TimeoutException e) {
throw new IOException("Timed out waiting for doPreUpgrade() response");
}
}
@Override
public void doUpgrade(Storage storage) throws IOException {
QuorumCall<AsyncLogger, Void> call = loggers.doUpgrade(storage);
try {
call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
"doUpgrade");
if (call.countExceptions() > 0) {
call.rethrowException("Could not perform upgrade of one or more JournalNodes");
}
} catch (InterruptedException e) {
throw new IOException("Interrupted waiting for doUpgrade() response");
} catch (TimeoutException e) {
throw new IOException("Timed out waiting for doUpgrade() response");
}
}
@Override
public void doFinalize() throws IOException {
QuorumCall<AsyncLogger, Void> call = loggers.doFinalize();
try {
call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
"doFinalize");
if (call.countExceptions() > 0) {
call.rethrowException("Could not finalize one or more JournalNodes");
}
} catch (InterruptedException e) {
throw new IOException("Interrupted waiting for doFinalize() response");
} catch (TimeoutException e) {
throw new IOException("Timed out waiting for doFinalize() response");
}
}
@Override
public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage,
int targetLayoutVersion) throws IOException {
QuorumCall<AsyncLogger, Boolean> call = loggers.canRollBack(storage,
prevStorage, targetLayoutVersion);
try {
call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
"lockSharedStorage");
if (call.countExceptions() > 0) {
call.rethrowException("Could not check if roll back possible for"
+ " one or more JournalNodes");
}
// Either they all return the same thing or this call fails, so we can
// just return the first result.
try {
DFSUtil.assertAllResultsEqual(call.getResults().values());
} catch (AssertionError ae) {
throw new IOException("Results differed for canRollBack", ae);
}
for (Boolean result : call.getResults().values()) {
return result;
}
} catch (InterruptedException e) {
throw new IOException("Interrupted waiting for lockSharedStorage() " +
"response");
} catch (TimeoutException e) {
throw new IOException("Timed out waiting for lockSharedStorage() " +
"response");
}
throw new AssertionError("Unreachable code.");
}
@Override
public void doRollback() throws IOException {
QuorumCall<AsyncLogger, Void> call = loggers.doRollback();
try {
call.waitFor(loggers.size(), loggers.size(), 0, timeoutMs,
"doRollback");
if (call.countExceptions() > 0) {
call.rethrowException("Could not perform rollback of one or more JournalNodes");
}
} catch (InterruptedException e) {
throw new IOException("Interrupted waiting for doFinalize() response");
} catch (TimeoutException e) {
throw new IOException("Timed out waiting for doFinalize() response");
}
}
@Override
public void discardSegments(long startTxId) throws IOException {
QuorumCall<AsyncLogger, Void> call = loggers.discardSegments(startTxId);
try {
call.waitFor(loggers.size(), loggers.size(), 0,
timeoutMs, "discardSegments");
if (call.countExceptions() > 0) {
call.rethrowException(
"Could not perform discardSegments of one or more JournalNodes");
}
} catch (InterruptedException e) {
throw new IOException(
"Interrupted waiting for discardSegments() response");
} catch (TimeoutException e) {
throw new IOException(
"Timed out waiting for discardSegments() response");
}
}
@Override
public long getJournalCTime() throws IOException {
QuorumCall<AsyncLogger, Long> call = loggers.getJournalCTime();
try {
call.waitFor(loggers.size(), loggers.size(), 0,
timeoutMs, "getJournalCTime");
if (call.countExceptions() > 0) {
call.rethrowException("Could not journal CTime for one "
+ "more JournalNodes");
}
// Either they all return the same thing or this call fails, so we can
// just return the first result.
try {
DFSUtil.assertAllResultsEqual(call.getResults().values());
} catch (AssertionError ae) {
throw new IOException("Results differed for getJournalCTime", ae);
}
for (Long result : call.getResults().values()) {
return result;
}
} catch (InterruptedException e) {
throw new IOException("Interrupted waiting for getJournalCTime() " +
"response");
} catch (TimeoutException e) {
throw new IOException("Timed out waiting for getJournalCTime() " +
"response");
}
throw new AssertionError("Unreachable code.");
}
}
| QuorumJournalManager |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/StreamTaskTest.java | {
"start": 115082,
"end": 116350
} | class ____ implements KeyedStateHandle {
private static final long serialVersionUID = -2473861305282291582L;
private final transient CompletableFuture<Void> discardFuture = new CompletableFuture<>();
private final StateHandleID stateHandleId = StateHandleID.randomStateHandleId();
public CompletableFuture<Void> getDiscardFuture() {
return discardFuture;
}
@Override
public KeyGroupRange getKeyGroupRange() {
return KeyGroupRange.EMPTY_KEY_GROUP_RANGE;
}
@Override
public TestingKeyedStateHandle getIntersection(KeyGroupRange keyGroupRange) {
return this;
}
@Override
public StateHandleID getStateHandleId() {
return stateHandleId;
}
@Override
public void registerSharedStates(SharedStateRegistry stateRegistry, long checkpointID) {}
@Override
public void discardState() {
discardFuture.complete(null);
}
@Override
public long getStateSize() {
return 0L;
}
@Override
public long getCheckpointedSize() {
return getStateSize();
}
}
private static | TestingKeyedStateHandle |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java | {
"start": 7135,
"end": 7677
} | class ____ the interfaces to be checked.
* @param methodName Method name of the method we wish to call.
* @param parameterTypes The parameter type signatures.
* @return the accessible method or {@code null} if not found.
*/
private static Method getAccessibleMethodFromInterfaceNest(Class<?> cls, final String methodName, final Class<?>... parameterTypes) {
// Search up the superclass chain
for (; cls != null; cls = cls.getSuperclass()) {
// Check the implemented interfaces of the parent | for |
java | apache__flink | flink-libraries/flink-state-processing-api/src/main/java/org/apache/flink/state/api/input/splits/OperatorStateInputSplit.java | {
"start": 1324,
"end": 2493
} | class ____ implements PrioritizedOperatorSubtaskStateInputSplit {
private static final long serialVersionUID = -1892383531558135420L;
private final StateObjectCollection<OperatorStateHandle> managedOperatorState;
private final int splitNum;
public OperatorStateInputSplit(
StateObjectCollection<OperatorStateHandle> managedOperatorState, int splitNum) {
this.managedOperatorState = managedOperatorState;
this.splitNum = splitNum;
}
@Override
public int getSplitNumber() {
return splitNum;
}
@Nonnull
public StateObjectCollection<OperatorStateHandle> getPrioritizedManagedOperatorState() {
return this.managedOperatorState;
}
@Override
public PrioritizedOperatorSubtaskState getPrioritizedOperatorSubtaskState() {
final OperatorSubtaskState subtaskState =
OperatorSubtaskState.builder()
.setManagedOperatorState(managedOperatorState)
.build();
return new PrioritizedOperatorSubtaskState.Builder(subtaskState, Collections.emptyList())
.build();
}
}
| OperatorStateInputSplit |
java | apache__flink | flink-metrics/flink-metrics-dropwizard/src/main/java/org/apache/flink/dropwizard/metrics/FlinkGaugeWrapper.java | {
"start": 977,
"end": 1468
} | class ____<T> implements com.codahale.metrics.Gauge<T> {
private final Gauge<T> gauge;
public FlinkGaugeWrapper(Gauge<T> gauge) {
this.gauge = gauge;
}
@Override
public T getValue() {
return this.gauge.getValue();
}
public static <T> FlinkGaugeWrapper<T> fromGauge(Gauge<?> gauge) {
@SuppressWarnings("unchecked")
Gauge<T> typedGauge = (Gauge<T>) gauge;
return new FlinkGaugeWrapper<>(typedGauge);
}
}
| FlinkGaugeWrapper |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/processor/src/main/java/org/jboss/resteasy/reactive/server/processor/generation/multipart/MultipartFeature.java | {
"start": 281,
"end": 581
} | class ____ extends AbstractFeatureScanner {
@Override
public void integrateWithIndexer(ServerEndpointIndexer.Builder builder, IndexView index) {
builder.setMultipartReturnTypeIndexerExtension(new GeneratedHandlerMultipartReturnTypeIndexerExtension(classOutput));
}
}
| MultipartFeature |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/RLocalCachedMapRx.java | {
"start": 1082,
"end": 1765
} | interface ____<K, V> extends RMapRx<K, V> {
/**
* Clears local cache across all instances
*
* @return void
*/
Completable clearLocalCache();
/**
* Returns all keys stored in local cache
*
* @return keys
*/
Set<K> cachedKeySet();
/**
* Returns all values stored in local cache
*
* @return values
*/
Collection<V> cachedValues();
/**
* Returns all map entries stored in local cache
*
* @return entries
*/
Set<Map.Entry<K, V>> cachedEntrySet();
/**
* Returns state of local cache
*
* @return map
*/
Map<K, V> getCachedMap();
}
| RLocalCachedMapRx |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/configuration/ImportedConfigurationClassEnhancementTests.java | {
"start": 1383,
"end": 2063
} | class ____ {
@Test
void autowiredConfigClassIsEnhancedWhenImported() {
autowiredConfigClassIsEnhanced(ConfigThatDoesImport.class);
}
@Test
void autowiredConfigClassIsEnhancedWhenRegisteredViaConstructor() {
autowiredConfigClassIsEnhanced(ConfigThatDoesNotImport.class, ConfigToBeAutowired.class);
}
@SuppressWarnings("deprecation")
private void autowiredConfigClassIsEnhanced(Class<?>... configClasses) {
ConfigurableApplicationContext ctx = new AnnotationConfigApplicationContext(configClasses);
Config config = ctx.getBean(Config.class);
assertThat(ClassUtils.isCglibProxy(config.autowiredConfig)).as("autowired config | ImportedConfigurationClassEnhancementTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/PrimitiveArrayPassedToVarargsMethodTest.java | {
"start": 2666,
"end": 3268
} | class ____ {
public void intVarargsMethod(int... ints) {}
public void intArrayVarargsMethod(int[]... intArrays) {}
public void objectVarargsMethodWithMultipleParams(Object obj1, Object... objs) {}
public void doIt() {
int[] intArray = {1, 2, 3};
intVarargsMethod(intArray);
intArrayVarargsMethod(intArray);
objectVarargsMethodWithMultipleParams(new Object());
}
}\
""")
.doTest();
}
}
| PrimitiveArrayPassedToVarargsMethodNegativeCases |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/result/method/AbstractHandlerMethodMapping.java | {
"start": 22423,
"end": 22591
} | class ____ {
@SuppressWarnings("unused")
public void handle() {
throw new UnsupportedOperationException("Not implemented");
}
}
}
| PreFlightAmbiguousMatchHandler |
java | apache__hadoop | hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/FileName.java | {
"start": 2649,
"end": 6822
} | class ____ a hardcoded name
if (!getName().equals(name)) {
throw new RuntimeException("State name mismatch! Expected '"
+ getName() + "' but found '" + name + "'.");
}
}
}
public FileName(String fileName) {
this.fileName = fileName;
}
@Override
public String getValue() {
return fileName;
}
@Override
public String getAnonymizedValue(StatePool statePool,
Configuration conf) {
if (anonymizedFileName == null) {
anonymize(statePool, conf);
}
return anonymizedFileName;
}
private void anonymize(StatePool statePool, Configuration conf) {
FileNameState fState = (FileNameState) statePool.getState(getClass());
if (fState == null) {
fState = new FileNameState();
statePool.addState(getClass(), fState);
}
String[] files = StringUtils.split(fileName);
String[] anonymizedFileNames = new String[files.length];
int i = 0;
for (String f : files) {
anonymizedFileNames[i++] =
anonymize(statePool, conf, fState, f);
}
anonymizedFileName = StringUtils.arrayToString(anonymizedFileNames);
}
private static String anonymize(StatePool statePool, Configuration conf,
FileNameState fState, String fileName) {
String ret = null;
try {
URI uri = new URI(fileName);
// anonymize the path i.e without the authority & scheme
ret =
anonymizePath(uri.getPath(), fState.getDirectoryState(),
fState.getFileNameState());
// anonymize the authority and scheme
String authority = uri.getAuthority();
String scheme = uri.getScheme();
if (scheme != null) {
String anonymizedAuthority = "";
if (authority != null) {
// anonymize the authority
NodeName hostName = new NodeName(null, uri.getHost());
anonymizedAuthority = hostName.getAnonymizedValue(statePool, conf);
}
ret = scheme + "://" + anonymizedAuthority + ret;
}
} catch (URISyntaxException use) {
throw new RuntimeException (use);
}
return ret;
}
// Anonymize the file-path
private static String anonymizePath(String path, WordList dState,
WordList fState) {
StringBuilder buffer = new StringBuilder();
StringTokenizer tokenizer = new StringTokenizer(path, Path.SEPARATOR, true);
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (Path.SEPARATOR.equals(token)) {
buffer.append(token);
} else if (Path.CUR_DIR.equals(token)) {
buffer.append(token);
} else if (PREV_DIR.equals(token)) {
buffer.append(token);
} else if (tokenizer.hasMoreTokens()){
// this component is a directory
buffer.append(anonymize(token, dState));
} else {
// this component is a file
buffer.append(anonymize(token, fState));
}
}
return buffer.toString();
}
//TODO There is no caching for saving memory.
private static String anonymize(String data, WordList wordList) {
if (data == null) {
return null;
}
if (WordListAnonymizerUtility.needsAnonymization(data)) {
String suffix = "";
String coreData = data;
// check and extract suffix
if (WordListAnonymizerUtility.hasSuffix(data, KNOWN_SUFFIXES)) {
// check if the data ends with a known suffix
String[] split =
WordListAnonymizerUtility.extractSuffix(data, KNOWN_SUFFIXES);
suffix = split[1];
coreData = split[0];
}
// check if the data is known content
//TODO [Chunking] Do this for sub-strings of data
String anonymizedData = coreData;
if (!WordListAnonymizerUtility.isKnownData(coreData)) {
if (!wordList.contains(coreData)) {
wordList.add(coreData);
}
anonymizedData = wordList.getName() + wordList.indexOf(coreData);
}
return anonymizedData + suffix;
} else {
return data;
}
}
} | has |
java | elastic__elasticsearch | x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java | {
"start": 18847,
"end": 19843
} | class ____ extends MockRepository.Plugin {
public static final String TYPE = "custom-mock";
@Override
public Map<String, Repository.Factory> getRepositories(
Environment env,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
BigArrays bigArrays,
RecoverySettings recoverySettings,
RepositoriesMetrics repositoriesMetrics,
SnapshotMetrics snapshotMetrics
) {
return Collections.singletonMap(
TYPE,
(projectId, metadata) -> new CustomMockRepository(
projectId,
metadata,
env,
namedXContentRegistry,
clusterService,
bigArrays,
recoverySettings,
snapshotMetrics
)
);
}
}
public static | CustomMockRepositoryPlugin |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/ExecutionSlotAllocationContext.java | {
"start": 1364,
"end": 3137
} | interface ____ extends InputsLocationsRetriever, StateLocationRetriever {
/**
* Returns required resources for an execution vertex.
*
* @param executionVertexId id of the execution vertex
* @return required resources for the given execution vertex
*/
ResourceProfile getResourceProfile(ExecutionVertexID executionVertexId);
/**
* Returns prior allocation id for an execution vertex.
*
* @param executionVertexId id of the execution vertex
* @return prior allocation id for the given execution vertex if it exists; otherwise {@code
* Optional.empty()}
*/
Optional<AllocationID> findPriorAllocationId(ExecutionVertexID executionVertexId);
/**
* Returns the scheduling topology containing all execution vertices and edges.
*
* @return scheduling topology
*/
SchedulingTopology getSchedulingTopology();
/**
* Returns all slot sharing groups in the job.
*
* @return all slot sharing groups in the job
*/
Set<SlotSharingGroup> getLogicalSlotSharingGroups();
/**
* Returns all co-location groups in the job.
*
* @return all co-location groups in the job
*/
Set<CoLocationGroup> getCoLocationGroups();
/**
* Returns all reserved allocations. These allocations/slots were used to run certain vertices
* and reserving them can prevent other vertices to take these slots and thus help vertices to
* be deployed into their previous slots again after failover. It is needed if {@link
* org.apache.flink.configuration.StateRecoveryOptions#LOCAL_RECOVERY} is enabled.
*
* @return all reserved allocations
*/
Set<AllocationID> getReservedAllocations();
}
| ExecutionSlotAllocationContext |
java | apache__camel | components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/fix/BindySimpleKeyValuePairWithoutSectionMarshallDslTest.java | {
"start": 1674,
"end": 3516
} | class ____ {
private static final String URI_MOCK_RESULT = "mock:result";
private static final String URI_MOCK_ERROR = "mock:error";
private static final String URI_DIRECT_START = "direct:start";
private List<Map<String, Object>> models = new ArrayList<>();
@Produce(URI_DIRECT_START)
private ProducerTemplate template;
@EndpointInject(URI_MOCK_RESULT)
private MockEndpoint result;
@EndpointInject(URI_MOCK_ERROR)
private MockEndpoint error;
@Test
public void testMarshallWithoutSection() throws Exception {
template.sendBody(generateModel());
// We don't expect to have a message as an error will be raised
result.expectedMessageCount(0);
// Message has been delivered to the mock error
error.expectedMessageCount(1);
result.assertIsSatisfied();
error.assertIsSatisfied();
// and check that we have the caused exception stored
Exchange exch = error.getReceivedExchanges().get(0);
Exception cause = exch.getProperty(Exchange.EXCEPTION_CAUGHT, IllegalArgumentException.class);
assertNotNull(cause);
assertEquals("@Section and/or @KeyValuePairDataField have not been defined", cause.getMessage());
}
public List<Map<String, Object>> generateModel() {
Map<String, Object> modelObjects = new HashMap<>();
Order order = new Order();
order.setAccount("BE.CHM.001");
order.setClOrdId("CHM0001-01");
order.setIDSource("4");
order.setSecurityId("BE0001245678");
order.setSide("1");
order.setText("this is a camel - bindy test");
modelObjects.put(order.getClass().getName(), order);
models.add(modelObjects);
return models;
}
public static | BindySimpleKeyValuePairWithoutSectionMarshallDslTest |
java | apache__camel | core/camel-core-languages/src/main/java/org/apache/camel/language/simple/SimpleExpressionBuilder.java | {
"start": 42164,
"end": 42739
} | class ____
String text = exp.evaluate(exchange, String.class);
Class<?> type = classResolver.resolveClass(text);
if (type != null) {
return type;
}
int pos = text.lastIndexOf('.');
if (pos > 0) {
String before = text.substring(0, pos);
String after = text.substring(pos + 1);
type = classResolver.resolveClass(before);
if (type != null) {
// special for | type |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptive/allocator/ReserveSlotFunction.java | {
"start": 1140,
"end": 1538
} | interface ____ {
/**
* Reserves the slot identified by the given allocation ID for the given resource profile.
*
* @param allocationId identifies the slot
* @param resourceProfile resource profile the slot must be able to fulfill
* @return reserved slot
*/
PhysicalSlot reserveSlot(AllocationID allocationId, ResourceProfile resourceProfile);
}
| ReserveSlotFunction |
java | elastic__elasticsearch | modules/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java | {
"start": 1565,
"end": 26972
} | class ____ extends ESTestCase {
private Processor processor;
@Before
public void createStandardProcessor() {
processor = new AttachmentProcessor(
randomAlphaOfLength(10),
null,
"source_field",
"target_field",
EnumSet.allOf(AttachmentProcessor.Property.class),
10000,
false,
null,
null,
false
);
}
public void testEnglishTextDocument() throws Exception {
Map<String, Object> attachmentData = parseDocument("text-in-english.txt", processor);
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length"));
assertThat(attachmentData.get("language"), is("en"));
assertThat(attachmentData.get("content"), is("\"God Save the Queen\" (alternatively \"God Save the King\""));
assertThat(attachmentData.get("content_type").toString(), containsString("text/plain"));
assertThat(attachmentData.get("content_length"), is(notNullValue()));
}
public void testHtmlDocumentWithRandomFields() throws Exception {
// some metadata are not present in the html doc
// "date", "metadata_date", "comments", "modified", "modifier", "print_date", "relation", "creator_tool", "altitude"
// "identifier", "longitude", "publisher", "description", "latitude", "format", "source", "coverage"
// "rating", "type", "contributor", "rights"
// we are only trying with content, title, author, keywords, content_type and content_length.
ArrayList<AttachmentProcessor.Property> fieldsList = new ArrayList<>(
EnumSet.of(
AttachmentProcessor.Property.CONTENT,
AttachmentProcessor.Property.TITLE,
AttachmentProcessor.Property.AUTHOR,
AttachmentProcessor.Property.KEYWORDS,
AttachmentProcessor.Property.CONTENT_TYPE,
AttachmentProcessor.Property.CONTENT_LENGTH
)
);
Set<AttachmentProcessor.Property> selectedProperties = new HashSet<>();
int numFields = randomIntBetween(1, fieldsList.size());
String[] selectedFieldNames = new String[numFields];
for (int i = 0; i < numFields; i++) {
AttachmentProcessor.Property property;
do {
property = randomFrom(fieldsList);
} while (selectedProperties.add(property) == false);
selectedFieldNames[i] = property.toLowerCase();
}
if (randomBoolean()) {
selectedProperties.add(AttachmentProcessor.Property.DATE);
}
processor = new AttachmentProcessor(
randomAlphaOfLength(10),
null,
"source_field",
"target_field",
selectedProperties,
10000,
false,
null,
null,
false
);
Map<String, Object> attachmentData = parseDocument("htmlWithEmptyDateMeta.html", processor);
assertThat(attachmentData.keySet(), hasSize(selectedFieldNames.length));
assertThat(attachmentData.keySet(), containsInAnyOrder(selectedFieldNames));
}
public void testFrenchTextDocument() throws Exception {
Map<String, Object> attachmentData = parseDocument("text-in-french.txt", processor);
assertThat(attachmentData.keySet(), hasItem("language"));
assertThat(attachmentData.get("language"), is("fr"));
}
public void testUnknownLanguageDocument() throws Exception {
Map<String, Object> attachmentData = parseDocument("text-gibberish.txt", processor);
assertThat(attachmentData.keySet(), hasItem("language"));
// lt seems some standard for not detected
assertThat(attachmentData.get("language"), is("lt"));
}
public void testEmptyTextDocument() throws Exception {
Map<String, Object> attachmentData = parseDocument("text-empty.txt", processor);
assertThat(attachmentData.keySet(), not(hasItem("language")));
}
public void testWordDocument() throws Exception {
Map<String, Object> attachmentData = parseDocument("issue-104.docx", processor);
assertThat(
attachmentData.keySet(),
containsInAnyOrder(
"content",
"language",
"date",
"author",
"content_type",
"content_length",
"modifier",
"modified",
"publisher"
)
);
assertThat(attachmentData.get("content"), is(notNullValue()));
assertThat(attachmentData.get("language"), is("en"));
assertThat(attachmentData.get("date"), is("2012-10-12T11:17:00Z"));
assertThat(attachmentData.get("author"), is("Windows User"));
assertThat(attachmentData.get("content_length"), is(notNullValue()));
assertThat(
attachmentData.get("content_type").toString(),
is("application/vnd.openxmlformats-officedocument.wordprocessingml.document")
);
assertThat(attachmentData.get("modifier").toString(), is("Luka Lampret"));
assertThat(attachmentData.get("modified").toString(), is("2015-02-20T11:36:00Z"));
assertThat(attachmentData.get("publisher").toString(), is("JDI"));
}
public void testWordDocumentWithVisioSchema() throws Exception {
Map<String, Object> attachmentData = parseDocument("issue-22077.docx", processor);
assertThat(
attachmentData.keySet(),
containsInAnyOrder(
"content",
"language",
"date",
"author",
"content_type",
"content_length",
"modifier",
"modified",
"print_date"
)
);
assertThat(attachmentData.get("content").toString(), containsString("Table of Contents"));
assertThat(attachmentData.get("language"), is("en"));
assertThat(attachmentData.get("date"), is("2015-01-06T18:07:00Z"));
assertThat(attachmentData.get("author"), is(notNullValue()));
assertThat(attachmentData.get("content_length"), is(notNullValue()));
assertThat(
attachmentData.get("content_type").toString(),
is("application/vnd.openxmlformats-officedocument.wordprocessingml.document")
);
assertThat(attachmentData.get("modifier").toString(), is("Chris Dufour"));
assertThat(attachmentData.get("modified").toString(), is("2016-12-04T16:58:00Z"));
assertThat(attachmentData.get("print_date").toString(), is("2015-01-05T19:12:00Z"));
}
public void testLegacyWordDocumentWithVisioSchema() throws Exception {
Map<String, Object> attachmentData = parseDocument("issue-22077.doc", processor);
assertThat(
attachmentData.keySet(),
containsInAnyOrder(
"content",
"language",
"date",
"author",
"content_type",
"content_length",
"modifier",
"modified",
"print_date"
)
);
assertThat(attachmentData.get("content").toString(), containsString("Table of Contents"));
assertThat(attachmentData.get("language"), is("en"));
assertThat(attachmentData.get("date"), is("2016-12-16T15:04:00Z"));
assertThat(attachmentData.get("author"), is(notNullValue()));
assertThat(attachmentData.get("content_length"), is(notNullValue()));
assertThat(attachmentData.get("content_type").toString(), is("application/msword"));
assertThat(attachmentData.get("modifier").toString(), is("David Pilato"));
assertThat(attachmentData.get("modified").toString(), is("2016-12-16T15:04:00Z"));
assertThat(attachmentData.get("print_date").toString(), is("2015-01-05T19:12:00Z"));
}
public void testPdf() throws Exception {
Map<String, Object> attachmentData = parseDocument("test.pdf", processor);
assertThat(
attachmentData.get("content"),
is("This is a test, with umlauts, from München\n\nAlso contains newlines for testing.\n\nAnd one more.")
);
assertThat(attachmentData.get("content_type").toString(), is("application/pdf"));
assertThat(attachmentData.get("content_length"), is(notNullValue()));
}
public void testVisioIsExcluded() throws Exception {
Map<String, Object> attachmentData = parseDocument("issue-22077.vsdx", processor);
assertThat(attachmentData.get("content"), nullValue());
assertThat(attachmentData.get("content_type"), is("application/vnd.ms-visio.drawing"));
assertThat(attachmentData.get("content_length"), is(0L));
}
public void testEncryptedWithPasswordPdf() throws Exception {
/*
* This tests that a PDF that has been encrypted with a password fails in the way expected
*/
ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> parseDocument("encrypted.pdf", processor));
assertThat(e.getDetailedMessage(), containsString("document is encrypted"));
}
public void testEncryptedWithKeyPdf() throws Exception {
/*
* This tests that a PDF that has been encrypted with a public key fails in the way expected
*/
ElasticsearchParseException e = expectThrows(
ElasticsearchParseException.class,
() -> parseDocument("encrypted-with-key.pdf", processor)
);
assertThat(e.getDetailedMessage(), containsString("document is encrypted"));
}
public void testHtmlDocument() throws Exception {
Map<String, Object> attachmentData = parseDocument("htmlWithEmptyDateMeta.html", processor);
assertThat(
attachmentData.keySet(),
containsInAnyOrder("language", "content", "author", "keywords", "title", "content_type", "content_length")
);
assertThat(attachmentData.get("language"), is("en"));
assertThat(attachmentData.get("content"), is(notNullValue()));
assertThat(attachmentData.get("content_length"), is(notNullValue()));
assertThat(attachmentData.get("author"), is("kimchy"));
assertThat(attachmentData.get("keywords"), is("elasticsearch,cool,bonsai"));
assertThat(attachmentData.get("title"), is("Hello"));
assertThat(attachmentData.get("content_type").toString(), containsString("text/html"));
}
public void testXHtmlDocument() throws Exception {
Map<String, Object> attachmentData = parseDocument("testXHTML.html", processor);
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "author", "title", "content_type", "content_length"));
assertThat(attachmentData.get("content_type").toString(), containsString("application/xhtml+xml"));
}
public void testEpubDocument() throws Exception {
Map<String, Object> attachmentData = parseDocument("testEPUB.epub", processor);
assertThat(
attachmentData.keySet(),
containsInAnyOrder(
"language",
"content",
"author",
"title",
"content_type",
"content_length",
"date",
"keywords",
"identifier",
"contributor",
"publisher",
"description"
)
);
assertThat(attachmentData.get("content_type").toString(), containsString("application/epub+zip"));
assertThat(attachmentData.get("identifier").toString(), is("1234567890"));
assertThat(attachmentData.get("contributor").toString(), is("no-one"));
assertThat(attachmentData.get("publisher").toString(), is("Apache"));
assertThat(attachmentData.get("description").toString(), is("This is an ePub test publication for Tika."));
}
// no real detection, just rudimentary
public void testAsciidocDocument() throws Exception {
Map<String, Object> attachmentData = parseDocument("asciidoc.asciidoc", processor);
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content_type", "content", "content_length"));
assertThat(attachmentData.get("content_type").toString(), containsString("text/plain"));
}
// See (https://issues.apache.org/jira/browse/COMPRESS-432) for information
// about the issue that causes a zip file to hang in Tika versions prior to 1.18.
public void testZipFileDoesNotHang() throws Exception {
parseDocument("bad_tika.zip", processor);
}
public void testParseAsBytesArray() throws Exception {
String path = "/org/elasticsearch/ingest/attachment/test/sample-files/text-in-english.txt";
byte[] bytes;
try (InputStream is = AttachmentProcessorTests.class.getResourceAsStream(path)) {
bytes = IOUtils.toByteArray(is);
}
Map<String, Object> document = new HashMap<>();
document.put("source_field", bytes);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
@SuppressWarnings("unchecked")
Map<String, Object> attachmentData = (Map<String, Object>) ingestDocument.getSourceAndMetadata().get("target_field");
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length"));
assertThat(attachmentData.get("language"), is("en"));
assertThat(attachmentData.get("content"), is("\"God Save the Queen\" (alternatively \"God Save the King\""));
assertThat(attachmentData.get("content_type").toString(), containsString("text/plain"));
assertThat(attachmentData.get("content_length"), is(notNullValue()));
}
public void testNullValueWithIgnoreMissing() throws Exception {
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(
random(),
Collections.singletonMap("source_field", null)
);
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
processor = new AttachmentProcessor(
randomAlphaOfLength(10),
null,
"source_field",
"randomTarget",
null,
10,
true,
null,
null,
false
);
processor.execute(ingestDocument);
assertIngestDocument(originalIngestDocument, ingestDocument);
}
public void testNonExistentWithIgnoreMissing() throws Exception {
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap());
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
processor = new AttachmentProcessor(
randomAlphaOfLength(10),
null,
"source_field",
"randomTarget",
null,
10,
true,
null,
null,
false
);
processor.execute(ingestDocument);
assertIngestDocument(originalIngestDocument, ingestDocument);
}
public void testNullWithoutIgnoreMissing() throws Exception {
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(
random(),
Collections.singletonMap("source_field", null)
);
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
processor = new AttachmentProcessor(
randomAlphaOfLength(10),
null,
"source_field",
"randomTarget",
null,
10,
false,
null,
null,
false
);
Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument));
assertThat(exception.getMessage(), equalTo("field [source_field] is null, cannot parse."));
}
public void testNonExistentWithoutIgnoreMissing() throws Exception {
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap());
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
processor = new AttachmentProcessor(
randomAlphaOfLength(10),
null,
"source_field",
"randomTarget",
null,
10,
false,
null,
null,
false
);
Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument));
assertThat(exception.getMessage(), equalTo("field [source_field] not present as part of path [source_field]"));
}
private Map<String, Object> parseDocument(String file, Processor attachmentProcessor) throws Exception {
return parseDocument(file, attachmentProcessor, new HashMap<>());
}
private Map<String, Object> parseDocument(String file, Processor attachmentProcessor, Map<String, Object> optionalFields)
throws Exception {
return parseDocument(file, attachmentProcessor, optionalFields, false);
}
private Map<String, Object> parseDocument(
String file,
Processor attachmentProcessor,
Map<String, Object> optionalFields,
boolean includeResourceName
) throws Exception {
Map<String, Object> document = new HashMap<>();
document.put("source_field", getAsBinaryOrBase64(file));
if (includeResourceName) {
document.put("resource_name", file);
}
document.putAll(optionalFields);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
attachmentProcessor.execute(ingestDocument);
@SuppressWarnings("unchecked")
Map<String, Object> attachmentData = (Map<String, Object>) ingestDocument.getSourceAndMetadata().get("target_field");
return attachmentData;
}
public void testIndexedChars() throws Exception {
processor = new AttachmentProcessor(
randomAlphaOfLength(10),
null,
"source_field",
"target_field",
EnumSet.allOf(AttachmentProcessor.Property.class),
19,
false,
null,
null,
false
);
Map<String, Object> attachmentData = parseDocument("text-in-english.txt", processor);
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length"));
assertThat(attachmentData.get("language"), is("en"));
assertThat(attachmentData.get("content"), is("\"God Save the Queen"));
assertThat(attachmentData.get("content_type").toString(), containsString("text/plain"));
assertThat(attachmentData.get("content_length"), is(19L));
processor = new AttachmentProcessor(
randomAlphaOfLength(10),
null,
"source_field",
"target_field",
EnumSet.allOf(AttachmentProcessor.Property.class),
19,
false,
"max_length",
null,
false
);
attachmentData = parseDocument("text-in-english.txt", processor);
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length"));
assertThat(attachmentData.get("language"), is("en"));
assertThat(attachmentData.get("content"), is("\"God Save the Queen"));
assertThat(attachmentData.get("content_type").toString(), containsString("text/plain"));
assertThat(attachmentData.get("content_length"), is(19L));
attachmentData = parseDocument("text-in-english.txt", processor, Collections.singletonMap("max_length", 10));
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length"));
assertThat(attachmentData.get("language"), is("sk"));
assertThat(attachmentData.get("content"), is("\"God Save"));
assertThat(attachmentData.get("content_type").toString(), containsString("text/plain"));
assertThat(attachmentData.get("content_length"), is(10L));
attachmentData = parseDocument("text-in-english.txt", processor, Collections.singletonMap("max_length", 100));
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length"));
assertThat(attachmentData.get("language"), is("en"));
assertThat(attachmentData.get("content"), is("\"God Save the Queen\" (alternatively \"God Save the King\""));
assertThat(attachmentData.get("content_type").toString(), containsString("text/plain"));
assertThat(attachmentData.get("content_length"), is(56L));
}
public void testIndexedCharsWithResourceName() throws Exception {
processor = new AttachmentProcessor(
randomAlphaOfLength(10),
null,
"source_field",
"target_field",
EnumSet.allOf(AttachmentProcessor.Property.class),
100,
false,
null,
"resource_name",
false
);
Map<String, Object> attachmentData = parseDocument(
"text-cjk-big5.txt",
processor,
Collections.singletonMap("max_length", 100),
true
);
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length"));
assertThat(attachmentData.get("content").toString(), containsString("碩鼠碩鼠,無食我黍!"));
assertThat(attachmentData.get("content_type").toString(), containsString("text/plain"));
assertThat(attachmentData.get("content_type").toString(), containsString("charset=Big5"));
assertThat(attachmentData.get("content_length"), is(100L));
attachmentData = parseDocument("text-cjk-gbk.txt", processor, Collections.singletonMap("max_length", 100), true);
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length"));
assertThat(attachmentData.get("content").toString(), containsString("硕鼠硕鼠,无食我黍!"));
assertThat(attachmentData.get("content_type").toString(), containsString("text/plain"));
assertThat(attachmentData.get("content_type").toString(), containsString("charset=GB18030"));
assertThat(attachmentData.get("content_length"), is(100L));
attachmentData = parseDocument("text-cjk-euc-jp.txt", processor, Collections.singletonMap("max_length", 100), true);
assertThat(attachmentData.keySet(), containsInAnyOrder("language", "content", "content_type", "content_length"));
assertThat(attachmentData.get("content").toString(), containsString("碩鼠よ碩鼠よ、" + System.lineSeparator() + "我が黍を食らう無かれ!"));
assertThat(attachmentData.get("content_type").toString(), containsString("text/plain"));
assertThat(attachmentData.get("content_type").toString(), containsString("charset=EUC-JP"));
assertThat(attachmentData.get("content_length"), is(100L));
}
public void testRemoveBinary() throws Exception {
{
// Test the default behavior.
Map<String, Object> document = new HashMap<>();
document.put("source_field", getAsBinaryOrBase64("text-in-english.txt"));
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
assertThat(ingestDocument.hasField("source_field"), is(true));
}
{
// Remove the binary field.
processor = new AttachmentProcessor(
randomAlphaOfLength(10),
null,
"source_field",
"target_field",
EnumSet.allOf(AttachmentProcessor.Property.class),
10000,
false,
null,
null,
true
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", getAsBinaryOrBase64("text-in-english.txt"));
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
assertThat(ingestDocument.hasField("source_field"), is(false));
}
}
private Object getAsBinaryOrBase64(String filename) throws Exception {
String path = "/org/elasticsearch/ingest/attachment/test/sample-files/" + filename;
try (InputStream is = AttachmentProcessorTests.class.getResourceAsStream(path)) {
byte bytes[] = IOUtils.toByteArray(is);
// behave like CBOR from time to time
if (rarely()) {
return bytes;
} else {
return Base64.getEncoder().encodeToString(bytes);
}
}
}
}
| AttachmentProcessorTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/PropertySetterAccessException.java | {
"start": 389,
"end": 619
} | class ____ extends PropertyAccessException {
/**
* Constructs a {@code PropertyAccessException} using the specified information.
*
* @param cause The underlying cause
* @param persistentClass The | PropertySetterAccessException |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/protocol/body/ConsumeByWho.java | {
"start": 956,
"end": 2047
} | class ____ extends RemotingSerializable {
private HashSet<String> consumedGroup = new HashSet<>();
private HashSet<String> notConsumedGroup = new HashSet<>();
private String topic;
private int queueId;
private long offset;
public HashSet<String> getConsumedGroup() {
return consumedGroup;
}
public void setConsumedGroup(HashSet<String> consumedGroup) {
this.consumedGroup = consumedGroup;
}
public HashSet<String> getNotConsumedGroup() {
return notConsumedGroup;
}
public void setNotConsumedGroup(HashSet<String> notConsumedGroup) {
this.notConsumedGroup = notConsumedGroup;
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public int getQueueId() {
return queueId;
}
public void setQueueId(int queueId) {
this.queueId = queueId;
}
public long getOffset() {
return offset;
}
public void setOffset(long offset) {
this.offset = offset;
}
}
| ConsumeByWho |
java | quarkusio__quarkus | integration-tests/redis-devservices/src/test/java/io/quarkus/redis/devservices/it/DevServicesRedisCustomPortReusableServiceTest.java | {
"start": 463,
"end": 873
} | class ____ {
@Test
@DisplayName("should start redis container with the given custom port")
public void shouldStartRedisContainer() {
// We could strengthen this test to make sure the container is the same as seen by other tests, but it's hard since we won't know the order
Assertions.assertTrue(SocketKit.isPortAlreadyUsed(6371));
}
}
| DevServicesRedisCustomPortReusableServiceTest |
java | micronaut-projects__micronaut-core | http-server-netty/src/main/java/io/micronaut/http/server/netty/NettyServerCustomizer.java | {
"start": 5076,
"end": 6353
} | enum ____ {
/**
* The channel is a "listener" channel, e.g. a
* {@link io.netty.channel.socket.ServerSocketChannel} representing a TCP listener the
* server is bound to.
*/
LISTENER,
/**
* The channel is a connection channel, e.g. a
* {@link io.netty.channel.socket.SocketChannel}, representing an HTTP connection.
*/
CONNECTION,
/**
* The channel is a channel representing an individual HTTP2 stream.
* <p>
* Note: As of 4.5.0, there is no separate channel for each request anymore for performance
* reasons. You can revert to the old behavior using the
* {@code micronaut.server.netty.legacy-multiplex-handlers=true} configuration property.
*/
REQUEST_STREAM,
/**
* The channel is a channel representing an individual HTTP2 stream, created for a push promise.
* <p>
* Note: As of 4.5.0, there is no separate channel for each request anymore for performance
* reasons. You can revert to the old behavior using the
* {@code micronaut.server.netty.legacy-multiplex-handlers=true} configuration property.
*/
PUSH_PROMISE_STREAM,
}
}
| ChannelRole |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging/deployment/src/test/java/io/quarkus/smallrye/reactivemessaging/signatures/MetadataInjectionTest.java | {
"start": 1887,
"end": 2143
} | class ____ {
private final int count;
public CounterMetadata(int count) {
this.count = count;
}
int getCount() {
return count;
}
}
@ApplicationScoped
public static | CounterMetadata |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/remote/request/InternalRequest.java | {
"start": 965,
"end": 1096
} | class ____ extends Request {
@Override
public String getModule() {
return INTERNAL_MODULE;
}
}
| InternalRequest |
java | elastic__elasticsearch | x-pack/plugin/deprecation/qa/early-deprecation-plugin/src/main/java/org/elasticsearch/xpack/deprecation/earlyplugin/EarlyDeprecationTestPlugin.java | {
"start": 629,
"end": 1006
} | class ____ extends Plugin implements ClusterPlugin {
private DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(EarlyDeprecationTestPlugin.class);
@Override
public void onNodeStarted() {
deprecationLogger.warn(DeprecationCategory.API, "early_deprecation", "Early deprecation emitted after node is started up");
}
}
| EarlyDeprecationTestPlugin |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java | {
"start": 8199,
"end": 32745
} | class ____ implements SimpleDiffable<DataStreamInfo>, ToXContentObject {
public static final ParseField STATUS_FIELD = new ParseField("status");
public static final ParseField INDEX_TEMPLATE_FIELD = new ParseField("template");
public static final ParseField SETTINGS_FIELD = new ParseField("settings");
public static final ParseField MAPPINGS_FIELD = new ParseField("mappings");
public static final ParseField PREFER_ILM = new ParseField("prefer_ilm");
public static final ParseField MANAGED_BY = new ParseField("managed_by");
public static final ParseField NEXT_GENERATION_INDEX_MANAGED_BY = new ParseField("next_generation_managed_by");
public static final ParseField ILM_POLICY_FIELD = new ParseField("ilm_policy");
public static final ParseField LIFECYCLE_FIELD = new ParseField("lifecycle");
public static final ParseField HIDDEN_FIELD = new ParseField("hidden");
public static final ParseField SYSTEM_FIELD = new ParseField("system");
public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing");
public static final ParseField REPLICATED = new ParseField("replicated");
public static final ParseField ROLLOVER_ON_WRITE = new ParseField("rollover_on_write");
public static final ParseField TIME_SERIES = new ParseField("time_series");
public static final ParseField TEMPORAL_RANGES = new ParseField("temporal_ranges");
public static final ParseField TEMPORAL_RANGE_START = new ParseField("start");
public static final ParseField TEMPORAL_RANGE_END = new ParseField("end");
public static final ParseField TIME_SINCE_LAST_AUTO_SHARD_EVENT = new ParseField("time_since_last_auto_shard_event");
public static final ParseField TIME_SINCE_LAST_AUTO_SHARD_EVENT_MILLIS = new ParseField(
"time_since_last_auto_shard_event_millis"
);
public static final ParseField FAILURE_STORE_ENABLED = new ParseField("enabled");
public static final ParseField MAXIMUM_TIMESTAMP = new ParseField("maximum_timestamp");
public static final ParseField INDEX_MODE = new ParseField("index_mode");
private final DataStream dataStream;
private final ClusterHealthStatus dataStreamStatus;
private final boolean failureStoreEffectivelyEnabled; // Must be serialized independently of dataStream as depends on settings
@Nullable
private final String indexTemplate;
@Nullable
private final String ilmPolicyName;
@Nullable
private final TimeSeries timeSeries;
private final Map<Index, IndexProperties> indexSettingsValues;
private final boolean templatePreferIlmValue;
@Nullable
private final Long maximumTimestamp;
@Nullable
private final String indexMode;
public DataStreamInfo(
DataStream dataStream,
boolean failureStoreEffectivelyEnabled,
ClusterHealthStatus dataStreamStatus,
@Nullable String indexTemplate,
@Nullable String ilmPolicyName,
@Nullable TimeSeries timeSeries,
Map<Index, IndexProperties> indexSettingsValues,
boolean templatePreferIlmValue,
@Nullable Long maximumTimestamp,
@Nullable String indexMode
) {
this.dataStream = dataStream;
this.failureStoreEffectivelyEnabled = failureStoreEffectivelyEnabled;
this.dataStreamStatus = dataStreamStatus;
this.indexTemplate = indexTemplate;
this.ilmPolicyName = ilmPolicyName;
this.timeSeries = timeSeries;
this.indexSettingsValues = indexSettingsValues;
this.templatePreferIlmValue = templatePreferIlmValue;
this.maximumTimestamp = maximumTimestamp;
this.indexMode = indexMode;
}
public DataStream getDataStream() {
return dataStream;
}
public boolean isFailureStoreEffectivelyEnabled() {
return failureStoreEffectivelyEnabled;
}
public ClusterHealthStatus getDataStreamStatus() {
return dataStreamStatus;
}
@Nullable
public String getIndexTemplate() {
return indexTemplate;
}
@Nullable
public String getIlmPolicy() {
return ilmPolicyName;
}
@Nullable
public TimeSeries getTimeSeries() {
return timeSeries;
}
public Map<Index, IndexProperties> getIndexSettingsValues() {
return indexSettingsValues;
}
public boolean templatePreferIlmValue() {
return templatePreferIlmValue;
}
@Nullable
public Long getMaximumTimestamp() {
return maximumTimestamp;
}
@Nullable
public String getIndexModeName() {
return indexMode;
}
/**
* NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until
* we no longer need to support calling this action remotely.
*/
@UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT)
@Override
public void writeTo(StreamOutput out) throws IOException {
dataStream.writeTo(out);
if (out.getTransportVersion().supports(TransportVersions.V_8_18_0)) {
out.writeBoolean(failureStoreEffectivelyEnabled);
}
dataStreamStatus.writeTo(out);
out.writeOptionalString(indexTemplate);
out.writeOptionalString(ilmPolicyName);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) {
out.writeOptionalWriteable(timeSeries);
}
if (out.getTransportVersion().onOrAfter(V_8_11_X)) {
out.writeMap(indexSettingsValues);
out.writeBoolean(templatePreferIlmValue);
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalVLong(maximumTimestamp);
}
if (out.getTransportVersion().supports(INCLUDE_INDEX_MODE_IN_GET_DATA_STREAM)) {
out.writeOptionalString(indexMode);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return toXContent(builder, params, null, null, null);
}
/**
* Converts the response to XContent and passes the RolloverConditions and the global retention, when provided,
* to the data stream.
*/
public XContentBuilder toXContent(
XContentBuilder builder,
Params params,
@Nullable RolloverConfiguration rolloverConfiguration,
@Nullable DataStreamGlobalRetention dataGlobalRetention,
@Nullable DataStreamGlobalRetention failureGlobalRetention
) throws IOException {
builder.startObject();
builder.field(DataStream.NAME_FIELD.getPreferredName(), dataStream.getName());
builder.field(DataStream.TIMESTAMP_FIELD_FIELD.getPreferredName())
.startObject()
.field(DataStream.NAME_FIELD.getPreferredName(), DataStream.TIMESTAMP_FIELD_NAME)
.endObject();
indicesToXContent(builder, dataStream.getIndices(), false);
builder.field(DataStream.GENERATION_FIELD.getPreferredName(), dataStream.getGeneration());
if (dataStream.getMetadata() != null) {
builder.field(DataStream.METADATA_FIELD.getPreferredName(), dataStream.getMetadata());
}
builder.field(STATUS_FIELD.getPreferredName(), dataStreamStatus);
if (indexTemplate != null) {
builder.field(INDEX_TEMPLATE_FIELD.getPreferredName(), indexTemplate);
}
if (dataStream.getDataLifecycle() != null) {
builder.field(LIFECYCLE_FIELD.getPreferredName());
dataStream.getDataLifecycle()
.toXContent(builder, params, rolloverConfiguration, dataGlobalRetention, dataStream.isInternal());
}
if (ilmPolicyName != null) {
builder.field(ILM_POLICY_FIELD.getPreferredName(), ilmPolicyName);
}
builder.field(NEXT_GENERATION_INDEX_MANAGED_BY.getPreferredName(), getNextGenerationManagedBy().displayValue);
builder.field(PREFER_ILM.getPreferredName(), templatePreferIlmValue);
builder.field(HIDDEN_FIELD.getPreferredName(), dataStream.isHidden());
builder.field(SYSTEM_FIELD.getPreferredName(), dataStream.isSystem());
builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), dataStream.isAllowCustomRouting());
builder.field(REPLICATED.getPreferredName(), dataStream.isReplicated());
builder.field(ROLLOVER_ON_WRITE.getPreferredName(), dataStream.rolloverOnWrite());
if (this.maximumTimestamp != null) {
builder.field(MAXIMUM_TIMESTAMP.getPreferredName(), this.maximumTimestamp);
}
if (this.indexMode != null) {
builder.field(INDEX_MODE.getPreferredName(), indexMode);
}
addAutoShardingEvent(builder, params, dataStream.getAutoShardingEvent());
if (timeSeries != null) {
builder.startObject(TIME_SERIES.getPreferredName());
builder.startArray(TEMPORAL_RANGES.getPreferredName());
for (var range : timeSeries.temporalRanges()) {
builder.startObject();
Instant start = range.v1();
builder.field(TEMPORAL_RANGE_START.getPreferredName(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(start));
Instant end = range.v2();
builder.field(TEMPORAL_RANGE_END.getPreferredName(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(end));
builder.endObject();
}
builder.endArray();
builder.endObject();
}
builder.startObject(SETTINGS_FIELD.getPreferredName());
dataStream.getSettings().toXContent(builder, params);
builder.endObject();
builder.field(MAPPINGS_FIELD.getPreferredName());
Map<String, Object> uncompressedMappings = XContentHelper.convertToMap(
dataStream.getMappings().uncompressed(),
true,
builder.contentType()
).v2();
builder.map(uncompressedMappings);
builder.startObject(DataStream.FAILURE_STORE_FIELD.getPreferredName());
builder.field(FAILURE_STORE_ENABLED.getPreferredName(), failureStoreEffectivelyEnabled);
builder.field(DataStream.ROLLOVER_ON_WRITE_FIELD.getPreferredName(), dataStream.getFailureComponent().isRolloverOnWrite());
indicesToXContent(builder, dataStream.getFailureIndices(), true);
addAutoShardingEvent(builder, params, dataStream.getFailureComponent().getAutoShardingEvent());
DataStreamLifecycle failuresLifecycle = dataStream.getFailuresLifecycle(failureStoreEffectivelyEnabled);
if (failuresLifecycle != null) {
builder.field(LIFECYCLE_FIELD.getPreferredName());
failuresLifecycle.toXContent(builder, params, rolloverConfiguration, failureGlobalRetention, dataStream.isInternal());
}
builder.endObject();
builder.endObject();
return builder;
}
private XContentBuilder indicesToXContent(XContentBuilder builder, List<Index> indices, boolean failureIndices)
throws IOException {
builder.field(DataStream.INDICES_FIELD.getPreferredName());
builder.startArray();
for (Index index : indices) {
builder.startObject();
index.toXContentFragment(builder);
IndexProperties indexProperties = indexSettingsValues.get(index);
if (indexProperties != null) {
builder.field(MANAGED_BY.getPreferredName(), indexProperties.managedBy.displayValue);
// Failure indices have more limitation than backing indices,
// so we hide some index properties that are less relevant
if (failureIndices) {
// We only display ILM info, if this index has an ILM policy
if (indexProperties.ilmPolicyName() != null) {
builder.field(PREFER_ILM.getPreferredName(), indexProperties.preferIlm());
builder.field(ILM_POLICY_FIELD.getPreferredName(), indexProperties.ilmPolicyName());
}
} else {
builder.field(PREFER_ILM.getPreferredName(), indexProperties.preferIlm());
if (indexProperties.ilmPolicyName() != null) {
builder.field(ILM_POLICY_FIELD.getPreferredName(), indexProperties.ilmPolicyName());
}
builder.field(INDEX_MODE.getPreferredName(), indexProperties.indexMode);
}
}
builder.endObject();
}
builder.endArray();
return builder;
}
private void addAutoShardingEvent(XContentBuilder builder, Params params, DataStreamAutoShardingEvent autoShardingEvent)
throws IOException {
if (autoShardingEvent == null) {
return;
}
builder.startObject(AUTO_SHARDING_FIELD.getPreferredName());
autoShardingEvent.toXContent(builder, params);
builder.humanReadableField(
TIME_SINCE_LAST_AUTO_SHARD_EVENT_MILLIS.getPreferredName(),
TIME_SINCE_LAST_AUTO_SHARD_EVENT.getPreferredName(),
autoShardingEvent.getTimeSinceLastAutoShardingEvent(System::currentTimeMillis)
);
builder.endObject();
}
/**
* Computes and returns which system will manage the next generation for this data stream.
*/
public ManagedBy getNextGenerationManagedBy() {
// both ILM and DSL are configured so let's check the prefer_ilm setting to see which system takes precedence
if (ilmPolicyName != null && dataStream.getDataLifecycle() != null && dataStream.getDataLifecycle().enabled()) {
return templatePreferIlmValue ? ManagedBy.ILM : ManagedBy.LIFECYCLE;
}
if (ilmPolicyName != null) {
return ManagedBy.ILM;
}
if (dataStream.getDataLifecycle() != null && dataStream.getDataLifecycle().enabled()) {
return ManagedBy.LIFECYCLE;
}
return ManagedBy.UNMANAGED;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DataStreamInfo that = (DataStreamInfo) o;
return templatePreferIlmValue == that.templatePreferIlmValue
&& Objects.equals(dataStream, that.dataStream)
&& failureStoreEffectivelyEnabled == that.failureStoreEffectivelyEnabled
&& dataStreamStatus == that.dataStreamStatus
&& Objects.equals(indexTemplate, that.indexTemplate)
&& Objects.equals(ilmPolicyName, that.ilmPolicyName)
&& Objects.equals(timeSeries, that.timeSeries)
&& Objects.equals(indexSettingsValues, that.indexSettingsValues)
&& Objects.equals(maximumTimestamp, that.maximumTimestamp)
&& Objects.equals(indexMode, that.indexMode);
}
@Override
public int hashCode() {
return Objects.hash(
dataStream,
dataStreamStatus,
failureStoreEffectivelyEnabled,
indexTemplate,
ilmPolicyName,
timeSeries,
indexSettingsValues,
templatePreferIlmValue,
maximumTimestamp,
indexMode
);
}
}
public record TimeSeries(List<Tuple<Instant, Instant>> temporalRanges) implements Writeable {
/**
* NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until
* we no longer need to support calling this action remotely.
*/
@UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT)
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeCollection(temporalRanges, (out1, value) -> {
out1.writeInstant(value.v1());
out1.writeInstant(value.v2());
});
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TimeSeries that = (TimeSeries) o;
return temporalRanges.equals(that.temporalRanges);
}
@Override
public int hashCode() {
return Objects.hash(temporalRanges);
}
}
/**
* Encapsulates the configured properties we want to display for each backing index.
* They'll usually be settings values, but could also be additional properties derived from settings.
*/
public record IndexProperties(boolean preferIlm, @Nullable String ilmPolicyName, ManagedBy managedBy, @Nullable String indexMode)
implements
Writeable {
public IndexProperties(StreamInput in) throws IOException {
this(
in.readBoolean(),
in.readOptionalString(),
in.readEnum(ManagedBy.class),
in.getTransportVersion().supports(INCLUDE_INDEX_MODE_IN_GET_DATA_STREAM) ? in.readOptionalString() : "unknown"
);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(preferIlm);
out.writeOptionalString(ilmPolicyName);
out.writeEnum(managedBy);
if (out.getTransportVersion().supports(INCLUDE_INDEX_MODE_IN_GET_DATA_STREAM)) {
out.writeOptionalString(indexMode);
}
}
}
private final List<DataStreamInfo> dataStreams;
@Nullable
private final RolloverConfiguration rolloverConfiguration;
@Nullable
private final DataStreamGlobalRetention dataGlobalRetention;
@Nullable
private final DataStreamGlobalRetention failuresGlobalRetention;
public Response(List<DataStreamInfo> dataStreams) {
this(dataStreams, null, null, null);
}
public Response(
List<DataStreamInfo> dataStreams,
@Nullable RolloverConfiguration rolloverConfiguration,
@Nullable DataStreamGlobalRetention dataGlobalRetention,
@Nullable DataStreamGlobalRetention failuresGlobalRetention
) {
this.dataStreams = dataStreams;
this.rolloverConfiguration = rolloverConfiguration;
this.dataGlobalRetention = dataGlobalRetention;
this.failuresGlobalRetention = failuresGlobalRetention;
}
public List<DataStreamInfo> getDataStreams() {
return dataStreams;
}
@Nullable
public RolloverConfiguration getRolloverConfiguration() {
return rolloverConfiguration;
}
@Nullable
public DataStreamGlobalRetention getDataGlobalRetention() {
return dataGlobalRetention;
}
@Nullable
public DataStreamGlobalRetention getFailuresGlobalRetention() {
return failuresGlobalRetention;
}
/**
* NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until
* we no longer need to support calling this action remotely.
*/
@UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT)
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeCollection(dataStreams);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
out.writeOptionalWriteable(rolloverConfiguration);
}
// A version 9.x cluster will never read this, so we only need to include the patch version here.
if (out.getTransportVersion().supports(INTRODUCE_FAILURES_DEFAULT_RETENTION_PATCH)) {
out.writeOptionalTimeValue(dataGlobalRetention == null ? null : dataGlobalRetention.defaultRetention());
out.writeOptionalTimeValue(dataGlobalRetention == null ? null : dataGlobalRetention.maxRetention());
out.writeOptionalTimeValue(failuresGlobalRetention == null ? null : failuresGlobalRetention.defaultRetention());
} else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) {
out.writeOptionalWriteable(dataGlobalRetention);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.startArray(DATA_STREAMS_FIELD.getPreferredName());
for (DataStreamInfo dataStream : dataStreams) {
dataStream.toXContent(
builder,
DataStreamLifecycle.addEffectiveRetentionParams(params),
rolloverConfiguration,
dataGlobalRetention,
failuresGlobalRetention
);
}
builder.endArray();
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Response response = (Response) o;
return dataStreams.equals(response.dataStreams)
&& Objects.equals(rolloverConfiguration, response.rolloverConfiguration)
&& Objects.equals(dataGlobalRetention, response.dataGlobalRetention)
&& Objects.equals(failuresGlobalRetention, response.failuresGlobalRetention);
}
@Override
public int hashCode() {
return Objects.hash(dataStreams, rolloverConfiguration, dataGlobalRetention, failuresGlobalRetention);
}
}
}
| DataStreamInfo |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableDebounce.java | {
"start": 5100,
"end": 6461
} | class ____<T, U> extends DisposableSubscriber<U> {
final DebounceSubscriber<T, U> parent;
final long index;
final T value;
boolean done;
final AtomicBoolean once = new AtomicBoolean();
DebounceInnerSubscriber(DebounceSubscriber<T, U> parent, long index, T value) {
this.parent = parent;
this.index = index;
this.value = value;
}
@Override
public void onNext(U t) {
if (done) {
return;
}
done = true;
cancel();
emit();
}
void emit() {
if (once.compareAndSet(false, true)) {
parent.emit(index, value);
}
}
@Override
public void onError(Throwable t) {
if (done) {
RxJavaPlugins.onError(t);
return;
}
done = true;
parent.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
emit();
}
}
}
}
| DebounceInnerSubscriber |
java | google__dagger | hilt-android/main/java/dagger/hilt/android/HiltAndroidApp.java | {
"start": 901,
"end": 1476
} | class ____ the Dagger components
* should be generated. Since all components will be built in the same compilation as the annotated
* application, all modules and entry points that should be installed in the component need to be
* transitive compilation dependencies of the annotated application.
*
* <p>Usage of this annotation is similar to {@link dagger.hilt.android.AndroidEntryPoint} with the
* only difference being that it only works on application classes and additionally triggers Dagger
* component generation.
*
* <p>This annotation will generate a base | where |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_2600/Issue2678.java | {
"start": 1298,
"end": 1783
} | class ____ {
private String name;
private int age;
@JSONField(serialzeFeatures = SerializerFeature.UseSingleQuotes)
public String getName()
{
return name;
}
public void setName( String name )
{
this.name = name;
}
public int getAge()
{
return age;
}
public void setAge( int age )
{
this.age = age;
}
}
}
| Person2 |
java | dropwizard__dropwizard | dropwizard-client/src/main/java/io/dropwizard/client/JerseyClientConfiguration.java | {
"start": 299,
"end": 553
} | class ____ by {@link JerseyClientBuilder}. Extends
* {@link HttpClientConfiguration}.
*
* @see HttpClientConfiguration
* @see <a href="http://dropwizard.io/1.0.2/docs/manual/configuration.html#jerseyclient">Jersey Client Configuration</a>
*/
public | used |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/NestedTestClassesTests.java | {
"start": 16479,
"end": 16642
} | class ____ {
@Test
void successful() {
}
@Test
void failing() {
Assertions.fail("something went wrong");
}
@Nested
| NestedInAbstractClass |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/MessageHistory.java | {
"start": 945,
"end": 2878
} | interface ____ {
/**
* Gets the route id at the point of this history.
*/
String getRouteId();
/**
* Gets the node at the point of this history.
*/
NamedNode getNode();
/**
* Gets the point in time the message history was created
*/
long getTime();
/**
* Gets the elapsed time in millis processing the node took (this is 0 until the node processing is done)
*/
long getElapsed();
/**
* The elapsed time since created.
*/
default long getElapsedSinceCreated() {
return System.nanoTime() - getTime();
}
/**
* Used for signalling that processing of the node is done.
*/
void nodeProcessingDone();
/**
* Used for signalling that processing of the node is done.
*
* @param delta extra time in millis that should be subtracted from the processing time
*/
void nodeProcessingDone(long delta);
/**
* A read-only copy of the message at the point of this history (if this has been enabled).
*/
Message getMessage();
/**
* Used specially during debugging where some EIP nodes are not accepted for debugging and are essentially skipped.
* This allows tooling to avoid dumping message history for nodes that did not take part in the debugger.
*/
void setAcceptDebugger(boolean acceptDebugger);
/**
* Used specially during debugging where some EIP nodes are not accepted for debugging and are essentially skipped.
* This allows tooling to avoid dumping message history for nodes that did not take part in the debugger.
*/
boolean isAcceptDebugger();
/**
* Used specially during debugging to know that an EIP was skipped over
*/
void setDebugSkipOver(boolean skipOver);
/**
* Used specially during debugging to know that an EIP was skipped over
*/
boolean isDebugSkipOver();
}
| MessageHistory |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AStorageStatistics.java | {
"start": 1330,
"end": 1659
} | class ____
extends StorageStatisticsFromIOStatistics {
public static final String NAME = "S3AStorageStatistics";
public S3AStorageStatistics(final IOStatistics ioStatistics) {
super(NAME, "s3a", ioStatistics);
}
public S3AStorageStatistics() {
super(NAME, "s3a", emptyStatistics());
}
}
| S3AStorageStatistics |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/basicapi/MocksSerializationForAnnotationTest.java | {
"start": 5886,
"end": 8691
} | class ____ implements Serializable {
Bar bar;
Foo() {
bar = new Bar();
bar.foo = this;
}
}
@Test
public void should_serialization_work() throws Exception {
// given
Foo foo = new Foo();
// when
foo = serializeAndBack(foo);
// then
assertSame(foo, foo.bar.foo);
}
@Test
public void should_stub_even_if_some_methods_called_after_serialization() throws Exception {
// given
// when
when(imethodsMock.simpleMethod(1)).thenReturn("foo");
ByteArrayOutputStream serialized = serializeMock(imethodsMock);
IMethods readObject = deserializeMock(serialized, IMethods.class);
when(readObject.simpleMethod(2)).thenReturn("bar");
// then
assertEquals("foo", readObject.simpleMethod(1));
assertEquals("bar", readObject.simpleMethod(2));
}
@Test
public void should_verify_call_order_for_serialized_mock() throws Exception {
imethodsMock.arrayReturningMethod();
imethodsMock2.arrayReturningMethod();
// when
ByteArrayOutputStream serialized = serializeMock(imethodsMock);
ByteArrayOutputStream serialized2 = serializeMock(imethodsMock2);
// then
IMethods readObject = deserializeMock(serialized, IMethods.class);
IMethods readObject2 = deserializeMock(serialized2, IMethods.class);
InOrder inOrder = inOrder(readObject, readObject2);
inOrder.verify(readObject).arrayReturningMethod();
inOrder.verify(readObject2).arrayReturningMethod();
}
@Test
public void should_remember_interactions_for_serialized_mock() throws Exception {
List<?> value = Collections.emptyList();
when(imethodsMock.objectArgMethod(anyString())).thenReturn(value);
imethodsMock.objectArgMethod("happened");
// when
ByteArrayOutputStream serialized = serializeMock(imethodsMock);
// then
IMethods readObject = deserializeMock(serialized, IMethods.class);
verify(readObject, never()).objectArgMethod("never happened");
}
@Test
public void should_serialize_with_stubbing_callback() throws Exception {
// given
CustomAnswersMustImplementSerializableForSerializationToWork answer =
new CustomAnswersMustImplementSerializableForSerializationToWork();
answer.string = "return value";
when(imethodsMock.objectArgMethod(anyString())).thenAnswer(answer);
// when
ByteArrayOutputStream serialized = serializeMock(imethodsMock);
// then
IMethods readObject = deserializeMock(serialized, IMethods.class);
assertEquals(answer.string, readObject.objectArgMethod(""));
}
static | Foo |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/runtime/operators/windowing/EvictingWindowOperatorContractTest.java | {
"start": 2223,
"end": 4738
} | class ____ extends WindowOperatorContractTest {
protected <W extends Window, OUT>
KeyedOneInputStreamOperatorTestHarness<Integer, Integer, OUT> createWindowOperator(
WindowAssigner<Integer, W> assigner,
Trigger<Integer, W> trigger,
long allowedLatenss,
InternalWindowFunction<Iterable<Integer>, OUT, Integer, W> windowFunction,
OutputTag<Integer> lateOutputTag)
throws Exception {
KeySelector<Integer, Integer> keySelector =
new KeySelector<Integer, Integer>() {
private static final long serialVersionUID = 1L;
@Override
public Integer getKey(Integer value) throws Exception {
return value;
}
};
ListStateDescriptor<StreamRecord<Integer>> intListDescriptor =
new ListStateDescriptor<>(
"int-list",
(TypeSerializer<StreamRecord<Integer>>)
new StreamElementSerializer(IntSerializer.INSTANCE));
@SuppressWarnings("unchecked")
EvictingWindowOperatorFactory<Integer, Integer, OUT, W> operator =
new EvictingWindowOperatorFactory<>(
assigner,
assigner.getWindowSerializer(new ExecutionConfig()),
keySelector,
IntSerializer.INSTANCE,
intListDescriptor,
windowFunction,
trigger,
CountEvictor.<W>of(100),
allowedLatenss,
lateOutputTag);
return new KeyedOneInputStreamOperatorTestHarness<>(
operator, keySelector, BasicTypeInfo.INT_TYPE_INFO);
}
protected <W extends Window, OUT>
KeyedOneInputStreamOperatorTestHarness<Integer, Integer, OUT> createWindowOperator(
WindowAssigner<Integer, W> assigner,
Trigger<Integer, W> trigger,
long allowedLatenss,
InternalWindowFunction<Iterable<Integer>, OUT, Integer, W> windowFunction)
throws Exception {
return createWindowOperator(
assigner, trigger, allowedLatenss, windowFunction, null /* late output tag */);
}
}
| EvictingWindowOperatorContractTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/lock/PessimisticWriteWithOptionalOuterJoinBreaksRefreshTest.java | {
"start": 2165,
"end": 2291
} | class ____ {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
Long id;
}
@Entity(name = "Child")
public static | Parent |
java | spring-projects__spring-boot | module/spring-boot-jetty/src/test/java/org/springframework/boot/jetty/autoconfigure/metrics/JettyMetricsAutoConfigurationTests.java | {
"start": 12118,
"end": 12319
} | class ____ {
@Bean
JettyServletWebServerFactory jettyFactory() {
return new JettyServletWebServerFactory(0);
}
}
@Configuration(proxyBeanMethods = false)
static | ServletWebServerConfiguration |
java | google__guice | extensions/throwingproviders/test/com/google/inject/throwingproviders/CheckedProviderTest.java | {
"start": 52167,
"end": 53074
} | class ____ implements Foo {
public InvalidFoo(String dep) {}
@Override
public String s() {
return null;
}
}
public void testNoThrowingInject() {
try {
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
ThrowingProviderBinder.create(binder())
.bind(RemoteProvider.class, Foo.class)
.providing(NormalInjectableFoo.class);
}
});
fail();
} catch (CreationException ce) {
assertEquals(
"Could not find a suitable constructor in "
+ NormalInjectableFoo.class.getName()
+ ". Classes must have either one (and only one) constructor annotated with "
+ "@ThrowingInject.",
Iterables.getOnlyElement(ce.getErrorMessages()).getMessage());
}
}
static | InvalidFoo |
java | google__dagger | javatests/dagger/internal/codegen/SubcomponentCreatorValidationTest.java | {
"start": 32168,
"end": 32267
} | interface ____ extends Supertype {",
" @Subcomponent.Builder",
" | HasSupertype |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/common/runtime/src/main/java/org/jboss/resteasy/reactive/common/util/DateUtil.java | {
"start": 6412,
"end": 6488
} | class ____ not be instantiated.
*/
private DateUtil() {
}
}
| should |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | {
"start": 23126,
"end": 46665
} | class ____ implements FsVolumeSpi.BlockIterator {
private final File bpidDir;
private final String name;
private final String bpid;
private long maxStalenessMs = 0;
private List<String> cache;
private long cacheMs;
private BlockIteratorState state;
BlockIteratorImpl(String bpid, String name) {
this.bpidDir = new File(currentDir, bpid);
this.name = name;
this.bpid = bpid;
rewind();
}
/**
* Get the next subdirectory within the block pool slice.
*
* @return The next subdirectory within the block pool slice, or
* null if there are no more.
*/
private String getNextSubDir(String prev, File dir)
throws IOException {
List<String> children = fileIoProvider.listDirectory(
FsVolumeImpl.this, dir, SubdirFilter.INSTANCE);
cache = null;
cacheMs = 0;
if (children.isEmpty()) {
LOG.trace("getNextSubDir({}, {}): no subdirectories found in {}",
storageID, bpid, dir.getAbsolutePath());
return null;
}
Collections.sort(children);
String nextSubDir = nextSorted(children, prev);
LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} within {}",
storageID, bpid, nextSubDir, dir.getAbsolutePath());
return nextSubDir;
}
private String getNextFinalizedDir() throws IOException {
File dir = Paths.get(
bpidDir.getAbsolutePath(), "current", "finalized").toFile();
return getNextSubDir(state.curFinalizedDir, dir);
}
private String getNextFinalizedSubDir() throws IOException {
if (state.curFinalizedDir == null) {
return null;
}
File dir = Paths.get(
bpidDir.getAbsolutePath(), "current", "finalized",
state.curFinalizedDir).toFile();
return getNextSubDir(state.curFinalizedSubDir, dir);
}
private List<String> getSubdirEntries() throws IOException {
if (state.curFinalizedSubDir == null) {
return null; // There are no entries in the null subdir.
}
long now = Time.monotonicNow();
if (cache != null) {
long delta = now - cacheMs;
if (delta < maxStalenessMs) {
return cache;
} else {
LOG.trace("getSubdirEntries({}, {}): purging entries cache for {} " +
"after {} ms.", storageID, bpid, state.curFinalizedSubDir, delta);
cache = null;
}
}
File dir = Paths.get(bpidDir.getAbsolutePath(), "current", "finalized",
state.curFinalizedDir, state.curFinalizedSubDir).toFile();
List<String> entries = fileIoProvider.listDirectory(
FsVolumeImpl.this, dir, BlockFileFilter.INSTANCE);
if (entries.isEmpty()) {
entries = null;
LOG.trace("getSubdirEntries({}, {}): no entries found in {}", storageID,
bpid, dir.getAbsolutePath());
} else {
Collections.sort(entries);
LOG.trace("getSubdirEntries({}, {}): listed {} entries in {}",
storageID, bpid, entries.size(), dir.getAbsolutePath());
}
cache = entries;
cacheMs = now;
return cache;
}
/**
* Get the next block.<p/>
*
* Each volume has a hierarchical structure.<p/>
*
* <code>
* BPID B0
* finalized/
* subdir0
* subdir0
* blk_000
* blk_001
* ...
* subdir1
* subdir0
* ...
* rbw/
* </code>
*
* When we run out of entries at one level of the structure, we search
* progressively higher levels. For example, when we run out of blk_
* entries in a subdirectory, we search for the next subdirectory.
* And so on.
*/
@Override
public ExtendedBlock nextBlock() throws IOException {
if (state.atEnd) {
return null;
}
try {
while (true) {
List<String> entries = getSubdirEntries();
if (entries != null) {
state.curEntry = nextSorted(entries, state.curEntry);
if (state.curEntry == null) {
LOG.trace("nextBlock({}, {}): advancing from {} to next " +
"subdirectory.", storageID, bpid, state.curFinalizedSubDir);
} else {
ExtendedBlock block =
new ExtendedBlock(bpid, Block.filename2id(state.curEntry));
File expectedBlockDir = DatanodeUtil.idToBlockDir(
new File("."), block.getBlockId());
File actualBlockDir = Paths.get(".",
state.curFinalizedDir, state.curFinalizedSubDir).toFile();
if (!expectedBlockDir.equals(actualBlockDir)) {
LOG.error("nextBlock({}, {}): block id {} found in invalid " +
"directory. Expected directory: {}. " +
"Actual directory: {}", storageID, bpid,
block.getBlockId(), expectedBlockDir.getPath(),
actualBlockDir.getPath());
continue;
}
File blkFile = getBlockFile(bpid, block);
File metaFile ;
try {
metaFile = FsDatasetUtil.findMetaFile(blkFile);
} catch (FileNotFoundException e){
LOG.warn("nextBlock({}, {}): {}", storageID, bpid,
e.getMessage());
continue;
}
block.setGenerationStamp(
Block.getGenerationStamp(metaFile.getName()));
block.setNumBytes(blkFile.length());
LOG.trace("nextBlock({}, {}): advancing to {}",
storageID, bpid, block);
return block;
}
}
state.curFinalizedSubDir = getNextFinalizedSubDir();
if (state.curFinalizedSubDir == null) {
state.curFinalizedDir = getNextFinalizedDir();
if (state.curFinalizedDir == null) {
state.atEnd = true;
return null;
}
}
}
} catch (IOException e) {
state.atEnd = true;
LOG.error("nextBlock({}, {}): I/O error", storageID, bpid, e);
throw e;
}
}
private File getBlockFile(String bpid, ExtendedBlock blk)
throws IOException {
return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(bpid),
blk.getBlockId()).toString() + "/" + blk.getBlockName());
}
@Override
public boolean atEnd() {
return state.atEnd;
}
@Override
public void rewind() {
cache = null;
cacheMs = 0;
state = new BlockIteratorState();
}
@Override
public void save() throws IOException {
state.lastSavedMs = Time.now();
boolean success = false;
try (BufferedWriter writer = new BufferedWriter(
new OutputStreamWriter(fileIoProvider.getFileOutputStream(
FsVolumeImpl.this, getTempSaveFile()), StandardCharsets.UTF_8))) {
WRITER.writeValue(writer, state);
success = true;
} finally {
if (!success) {
fileIoProvider.delete(FsVolumeImpl.this, getTempSaveFile());
}
}
fileIoProvider.move(FsVolumeImpl.this,
getTempSaveFile().toPath(), getSaveFile().toPath(),
StandardCopyOption.ATOMIC_MOVE);
if (LOG.isTraceEnabled()) {
LOG.trace("save({}, {}): saved {}", storageID, bpid,
WRITER.writeValueAsString(state));
}
}
public void load() throws IOException {
File file = getSaveFile();
this.state = READER.readValue(file);
if (LOG.isTraceEnabled()) {
LOG.trace("load({}, {}): loaded iterator {} from {}: {}", storageID,
bpid, name, file.getAbsoluteFile(),
WRITER.writeValueAsString(state));
}
}
File getSaveFile() {
return new File(bpidDir, name + ".cursor");
}
File getTempSaveFile() {
return new File(bpidDir, name + ".cursor.tmp");
}
@Override
public void setMaxStalenessMs(long maxStalenessMs) {
this.maxStalenessMs = maxStalenessMs;
}
@Override
public void close() throws IOException {
// No action needed for this volume implementation.
}
@Override
public long getIterStartMs() {
return state.iterStartMs;
}
@Override
public long getLastSavedMs() {
return state.lastSavedMs;
}
@Override
public String getBlockPoolId() {
return bpid;
}
}
@Override
public BlockIterator newBlockIterator(String bpid, String name) {
return new BlockIteratorImpl(bpid, name);
}
@Override
public BlockIterator loadBlockIterator(String bpid, String name)
throws IOException {
BlockIteratorImpl iter = new BlockIteratorImpl(bpid, name);
iter.load();
return iter;
}
@Override
public FsDatasetSpi<? extends FsVolumeSpi> getDataset() {
return dataset;
}
/**
* RBW files. They get moved to the finalized block directory when
* the block is finalized.
*/
File createRbwFile(String bpid, Block b) throws IOException {
checkReference();
reserveSpaceForReplica(b.getNumBytes());
try {
return getBlockPoolSlice(bpid).createRbwFile(b);
} catch (IOException exception) {
releaseReservedSpace(b.getNumBytes());
throw exception;
}
}
/**
*
* @param bytesReserved Space that was reserved during
* block creation. Now that the block is being finalized we
* can free up this space.
* @return
* @throws IOException
*/
ReplicaInfo addFinalizedBlock(String bpid, Block b, ReplicaInfo replicaInfo,
long bytesReserved) throws IOException {
releaseReservedSpace(bytesReserved);
File dest = getBlockPoolSlice(bpid).addFinalizedBlock(b, replicaInfo);
final byte[] checksum;
// copy the last partial checksum if the replica is originally
// in finalized or rbw state.
switch (replicaInfo.getState()) {
case FINALIZED:
FinalizedReplica finalized = (FinalizedReplica) replicaInfo;
checksum = finalized.getLastPartialChunkChecksum();
break;
case RBW:
ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaInfo;
checksum = rbw.getLastChecksumAndDataLen().getChecksum();
break;
default:
checksum = null;
break;
}
return new ReplicaBuilder(ReplicaState.FINALIZED)
.setBlock(replicaInfo)
.setFsVolume(this)
.setDirectoryToUse(dest.getParentFile())
.setLastPartialChunkChecksum(checksum)
.build();
}
Executor getCacheExecutor() {
return cacheExecutor;
}
@Override
public VolumeCheckResult check(VolumeCheckContext ignored)
throws DiskErrorException {
// TODO:FEDERATION valid synchronization
for (BlockPoolSlice s : bpSlices.values()) {
s.checkDirs();
}
return VolumeCheckResult.HEALTHY;
}
void getVolumeMap(ReplicaMap volumeMap,
final RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
for (BlockPoolSlice s : bpSlices.values()) {
s.getVolumeMap(volumeMap, ramDiskReplicaMap);
}
}
void getVolumeMap(String bpid, ReplicaMap volumeMap,
final RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
getBlockPoolSlice(bpid).getVolumeMap(volumeMap, ramDiskReplicaMap);
}
long getNumBlocks() {
long numBlocks = 0L;
for (BlockPoolSlice s : bpSlices.values()) {
numBlocks += s.getNumOfBlocks();
}
return numBlocks;
}
@Override
public String toString() {
return currentDir != null ? currentDir.getParent() : "NULL";
}
void shutdown() {
if (cacheExecutor != null) {
cacheExecutor.shutdown();
}
Set<Entry<String, BlockPoolSlice>> set = bpSlices.entrySet();
for (Entry<String, BlockPoolSlice> entry : set) {
entry.getValue().shutdown(null);
}
if (metrics != null) {
metrics.unRegister();
}
}
void addBlockPool(String bpid, Configuration c) throws IOException {
addBlockPool(bpid, c, null);
}
void addBlockPool(String bpid, Configuration c, Timer timer)
throws IOException {
File bpdir = new File(currentDir, bpid);
BlockPoolSlice bp;
if (timer == null) {
timer = new Timer();
}
bp = new BlockPoolSlice(bpid, this, bpdir, c, timer);
bpSlices.put(bpid, bp);
}
void shutdownBlockPool(String bpid, BlockListAsLongs blocksListsAsLongs) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.shutdown(blocksListsAsLongs);
}
bpSlices.remove(bpid);
}
boolean isBPDirEmpty(String bpid) throws IOException {
File volumeCurrentDir = this.getCurrentDir();
File bpDir = new File(volumeCurrentDir, bpid);
File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
File finalizedDir = new File(bpCurrentDir,
DataStorage.STORAGE_DIR_FINALIZED);
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
if (fileIoProvider.exists(this, finalizedDir) &&
!DatanodeUtil.dirNoFilesRecursive(this, finalizedDir, fileIoProvider)) {
return false;
}
if (fileIoProvider.exists(this, rbwDir) &&
fileIoProvider.list(this, rbwDir).length != 0) {
return false;
}
return true;
}
void deleteBPDirectories(String bpid, boolean force) throws IOException {
File volumeCurrentDir = this.getCurrentDir();
File bpDir = new File(volumeCurrentDir, bpid);
if (!bpDir.isDirectory()) {
// nothing to be deleted
return;
}
File tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
File finalizedDir = new File(bpCurrentDir,
DataStorage.STORAGE_DIR_FINALIZED);
File lazypersistDir = new File(bpCurrentDir,
DataStorage.STORAGE_DIR_LAZY_PERSIST);
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
if (force) {
fileIoProvider.fullyDelete(this, bpDir);
} else {
if (!fileIoProvider.delete(this, rbwDir)) {
throw new IOException("Failed to delete " + rbwDir);
}
if (!DatanodeUtil.dirNoFilesRecursive(
this, finalizedDir, fileIoProvider) ||
!fileIoProvider.fullyDelete(
this, finalizedDir)) {
throw new IOException("Failed to delete " + finalizedDir);
}
if (lazypersistDir.exists() &&
((!DatanodeUtil.dirNoFilesRecursive(
this, lazypersistDir, fileIoProvider) ||
!fileIoProvider.fullyDelete(this, lazypersistDir)))) {
throw new IOException("Failed to delete " + lazypersistDir);
}
fileIoProvider.fullyDelete(this, tmpDir);
for (File f : fileIoProvider.listFiles(this, bpCurrentDir)) {
if (!fileIoProvider.delete(this, f)) {
throw new IOException("Failed to delete " + f);
}
}
if (!fileIoProvider.delete(this, bpCurrentDir)) {
throw new IOException("Failed to delete " + bpCurrentDir);
}
for (File f : fileIoProvider.listFiles(this, bpDir)) {
if (!fileIoProvider.delete(this, f)) {
throw new IOException("Failed to delete " + f);
}
}
if (!fileIoProvider.delete(this, bpDir)) {
throw new IOException("Failed to delete " + bpDir);
}
}
}
@Override
public String getStorageID() {
return storageID;
}
@Override
public StorageType getStorageType() {
return storageType;
}
DatanodeStorage toDatanodeStorage() {
return new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, storageType);
}
@Override
public byte[] loadLastPartialChunkChecksum(
File blockFile, File metaFile) throws IOException {
// readHeader closes the temporary FileInputStream.
DataChecksum dcs;
try (FileInputStream fis = fileIoProvider.getFileInputStream(
this, metaFile)) {
dcs = BlockMetadataHeader.readHeader(fis).getChecksum();
}
final int checksumSize = dcs.getChecksumSize();
final long onDiskLen = blockFile.length();
final int bytesPerChecksum = dcs.getBytesPerChecksum();
if (onDiskLen % bytesPerChecksum == 0) {
// the last chunk is a complete one. No need to preserve its checksum
// because it will not be modified.
return null;
}
long offsetInChecksum = BlockMetadataHeader.getHeaderSize() +
(onDiskLen / bytesPerChecksum) * checksumSize;
byte[] lastChecksum = new byte[checksumSize];
try (RandomAccessFile raf = fileIoProvider.getRandomAccessFile(
this, metaFile, "r")) {
raf.seek(offsetInChecksum);
int readBytes = raf.read(lastChecksum, 0, checksumSize);
if (readBytes == -1) {
throw new IOException("Expected to read " + checksumSize +
" bytes from offset " + offsetInChecksum +
" but reached end of file.");
} else if (readBytes != checksumSize) {
throw new IOException("Expected to read " + checksumSize +
" bytes from offset " + offsetInChecksum + " but read " +
readBytes + " bytes.");
}
}
return lastChecksum;
}
public ReplicaInPipeline append(String bpid, ReplicaInfo replicaInfo,
long newGS, long estimateBlockLen) throws IOException {
long bytesReserved = estimateBlockLen - replicaInfo.getNumBytes();
if (getAvailable() < bytesReserved) {
throw new DiskOutOfSpaceException("Insufficient space for appending to "
+ replicaInfo);
}
assert replicaInfo.getVolume() == this:
"The volume of the replica should be the same as this volume";
// construct a RBW replica with the new GS
File newBlkFile = new File(getRbwDir(bpid), replicaInfo.getBlockName());
LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.RBW)
.setBlockId(replicaInfo.getBlockId())
.setLength(replicaInfo.getNumBytes())
.setGenerationStamp(newGS)
.setFsVolume(this)
.setDirectoryToUse(newBlkFile.getParentFile())
.setWriterThread(Thread.currentThread())
.setBytesToReserve(bytesReserved)
.buildLocalReplicaInPipeline();
// Only a finalized replica can be appended.
FinalizedReplica finalized = (FinalizedReplica)replicaInfo;
// load last checksum and datalen
newReplicaInfo.setLastChecksumAndDataLen(
finalized.getVisibleLength(), finalized.getLastPartialChunkChecksum());
// rename meta file to rbw directory
// rename block file to rbw directory
long oldReplicaLength = replicaInfo.getNumBytes() + replicaInfo.getMetadataLength();
newReplicaInfo.moveReplicaFrom(replicaInfo, newBlkFile);
getBlockPoolSlice(bpid).decDfsUsed(oldReplicaLength);
reserveSpaceForReplica(bytesReserved);
return newReplicaInfo;
}
public ReplicaInPipeline createRbw(ExtendedBlock b) throws IOException {
File f = createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.RBW)
.setBlockId(b.getBlockId())
.setGenerationStamp(b.getGenerationStamp())
.setFsVolume(this)
.setDirectoryToUse(f.getParentFile())
.setBytesToReserve(b.getNumBytes())
.buildLocalReplicaInPipeline();
return newReplicaInfo;
}
public ReplicaInPipeline convertTemporaryToRbw(ExtendedBlock b,
ReplicaInfo temp) throws IOException {
final long blockId = b.getBlockId();
final long expectedGs = b.getGenerationStamp();
final long visible = b.getNumBytes();
final long numBytes = temp.getNumBytes();
// move block files to the rbw directory
BlockPoolSlice bpslice = getBlockPoolSlice(b.getBlockPoolId());
final File dest = FsDatasetImpl.moveBlockFiles(b.getLocalBlock(), temp,
bpslice.getRbwDir());
// create RBW
final LocalReplicaInPipeline rbw = new ReplicaBuilder(ReplicaState.RBW)
.setBlockId(blockId)
.setLength(numBytes)
.setGenerationStamp(expectedGs)
.setFsVolume(this)
.setDirectoryToUse(dest.getParentFile())
.setWriterThread(Thread.currentThread())
.setBytesToReserve(0)
.buildLocalReplicaInPipeline();
rbw.setBytesAcked(visible);
// load last checksum and datalen
final File destMeta = FsDatasetUtil.getMetaFile(dest,
b.getGenerationStamp());
byte[] lastChunkChecksum = loadLastPartialChunkChecksum(dest, destMeta);
rbw.setLastChecksumAndDataLen(numBytes, lastChunkChecksum);
return rbw;
}
public ReplicaInPipeline createTemporary(ExtendedBlock b) throws IOException {
// create a temporary file to hold block in the designated volume
File f = createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
LocalReplicaInPipeline newReplicaInfo =
new ReplicaBuilder(ReplicaState.TEMPORARY)
.setBlockId(b.getBlockId())
.setGenerationStamp(b.getGenerationStamp())
.setDirectoryToUse(f.getParentFile())
.setBytesToReserve(b.getLocalBlock().getNumBytes())
.setFsVolume(this)
.buildLocalReplicaInPipeline();
return newReplicaInfo;
}
public ReplicaInPipeline updateRURCopyOnTruncate(ReplicaInfo rur,
String bpid, long newBlockId, long recoveryId, long newlength)
throws IOException {
rur.breakHardLinksIfNeeded();
File[] copiedReplicaFiles =
copyReplicaWithNewBlockIdAndGS(rur, bpid, newBlockId, recoveryId);
File blockFile = copiedReplicaFiles[1];
File metaFile = copiedReplicaFiles[0];
LocalReplica.truncateBlock(rur.getVolume(), blockFile, metaFile,
rur.getNumBytes(), newlength, fileIoProvider);
LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.RBW)
.setBlockId(newBlockId)
.setGenerationStamp(recoveryId)
.setFsVolume(this)
.setDirectoryToUse(blockFile.getParentFile())
.setBytesToReserve(newlength)
.buildLocalReplicaInPipeline();
// In theory, this rbw replica needs to reload last chunk checksum,
// but it is immediately converted to finalized state within the same lock,
// so no need to update it.
return newReplicaInfo;
}
private File[] copyReplicaWithNewBlockIdAndGS(
ReplicaInfo replicaInfo, String bpid, long newBlkId, long newGS)
throws IOException {
String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
FsVolumeImpl v = (FsVolumeImpl) replicaInfo.getVolume();
final File tmpDir = v.getBlockPoolSlice(bpid).getTmpDir();
final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
final File dstBlockFile = new File(destDir, blockFileName);
final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
return FsDatasetImpl.copyBlockFiles(replicaInfo, dstMetaFile,
dstBlockFile, true, DFSUtilClient.getSmallBufferSize(conf), conf);
}
@Override
public void compileReport(String bpid, Collection<ScanInfo> report,
ReportCompiler reportCompiler) throws InterruptedException, IOException {
compileReport(getFinalizedDir(bpid), getFinalizedDir(bpid), report,
reportCompiler);
}
@Override
public FileIoProvider getFileIoProvider() {
return fileIoProvider;
}
@Override
public DataNodeVolumeMetrics getMetrics() {
return metrics;
}
/**
* Filter for block file names stored on the file system volumes.
*/
public | BlockIteratorImpl |
java | apache__flink | flink-formats/flink-compress/src/main/java/org/apache/flink/formats/compress/extractor/DefaultExtractor.java | {
"start": 1021,
"end": 1208
} | class ____<T> implements Extractor<T> {
@Override
public byte[] extract(T element) {
return (element.toString() + System.lineSeparator()).getBytes();
}
}
| DefaultExtractor |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplate.java | {
"start": 1037,
"end": 4605
} | class ____ implements ToXContent {
private final Script script;
private final String inlineTemplate;
private final boolean mayRequireCompilation;
public TextTemplate(String template) {
this.script = null;
this.inlineTemplate = template;
this.mayRequireCompilation = template.contains("{{");
}
public TextTemplate(String template, @Nullable XContentType contentType, ScriptType type, @Nullable Map<String, Object> params) {
Map<String, String> options = null;
if (type == ScriptType.INLINE) {
options = new HashMap<>();
if (contentType != null) {
options.put(Script.CONTENT_TYPE_OPTION, contentType.canonical().mediaType());
}
}
if (params == null) {
params = new HashMap<>();
}
this.script = new Script(type, type == ScriptType.STORED ? null : Script.DEFAULT_TEMPLATE_LANG, template, options, params);
this.inlineTemplate = null;
this.mayRequireCompilation = script.getType() == ScriptType.STORED || script.getIdOrCode().contains("{{");
}
public TextTemplate(Script script) {
this.script = script;
this.inlineTemplate = null;
this.mayRequireCompilation = script.getType() == ScriptType.STORED || script.getIdOrCode().contains("{{");
}
public Script getScript() {
return script;
}
public String getTemplate() {
return script != null ? script.getIdOrCode() : inlineTemplate;
}
/**
* Check if compilation may be required.
* If a stored script is used, we cannot tell at this stage, so we always assume
* that stored scripts require compilation.
* If an inline script is used, we checked for the mustache opening brackets
*/
public boolean mayRequireCompilation() {
return mayRequireCompilation;
}
public XContentType getContentType() {
if (script == null || script.getOptions() == null) {
return null;
}
String mediaType = script.getOptions().get(Script.CONTENT_TYPE_OPTION);
if (mediaType == null) {
return null;
}
return XContentType.fromMediaType(mediaType);
}
public ScriptType getType() {
return script != null ? script.getType() : ScriptType.INLINE;
}
public Map<String, Object> getParams() {
return script != null ? script.getParams() : null;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TextTemplate template1 = (TextTemplate) o;
return Objects.equals(script, template1.script) && Objects.equals(inlineTemplate, template1.inlineTemplate);
}
@Override
public int hashCode() {
return Objects.hash(script, inlineTemplate);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (script != null) {
script.toXContent(builder, params);
} else {
builder.value(inlineTemplate);
}
return builder;
}
public static TextTemplate parse(XContentParser parser) throws IOException {
if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
return new TextTemplate(parser.text());
} else {
Script template = Script.parse(parser, Script.DEFAULT_TEMPLATE_LANG);
return new TextTemplate(template);
}
}
}
| TextTemplate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.