index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/security/AuthorizationContainer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.security;
import org.apache.accumulo.core.data.ByteSequence;
/**
* An interface for classes that contain a collection of authorizations.
*/
public interface AuthorizationContainer {
/**
* Checks whether this object contains the given authorization.
*
* @param auth authorization, as a string encoded in UTF-8
* @return true if authorization is in this collection
*/
boolean contains(ByteSequence auth);
}
| 9,700 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.security;
import static com.google.common.base.Preconditions.checkArgument;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Base64;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.util.ByteBufferUtil;
/**
* A collection of authorization strings.
*/
public class Authorizations implements Iterable<byte[]>, Serializable, AuthorizationContainer {
private static final long serialVersionUID = 1L;
private static final Set<ByteSequence> EMPTY_AUTH_SET = Collections.emptySet();
private static final List<byte[]> EMPTY_AUTH_LIST = Collections.emptyList();
private final Set<ByteSequence> auths;
private final List<byte[]> authsList; // sorted order
/**
* An empty set of authorizations.
*/
public static final Authorizations EMPTY = new Authorizations();
private static final boolean[] validAuthChars = new boolean[256];
/**
* A special header string used when serializing instances of this class.
*
* @see #serialize()
*/
public static final String HEADER = "!AUTH1:";
static {
for (int i = 0; i < 256; i++) {
validAuthChars[i] = false;
}
for (int i = 'a'; i <= 'z'; i++) {
validAuthChars[i] = true;
}
for (int i = 'A'; i <= 'Z'; i++) {
validAuthChars[i] = true;
}
for (int i = '0'; i <= '9'; i++) {
validAuthChars[i] = true;
}
validAuthChars['_'] = true;
validAuthChars['-'] = true;
validAuthChars[':'] = true;
validAuthChars['.'] = true;
validAuthChars['/'] = true;
}
static final boolean isValidAuthChar(byte b) {
return validAuthChars[0xff & b];
}
private void checkAuths() {
Set<ByteSequence> sortedAuths = new TreeSet<>(auths);
for (ByteSequence bs : sortedAuths) {
if (bs.length() == 0) {
throw new IllegalArgumentException("Empty authorization");
}
authsList.add(bs.toArray());
}
}
private static Set<ByteSequence> createInternalSet(int size) {
if (size < 1) {
return EMPTY_AUTH_SET;
} else {
return new HashSet<>(size);
}
}
private static List<byte[]> createInternalList(int size) {
if (size < 1) {
return EMPTY_AUTH_LIST;
} else {
return new ArrayList<>(size);
}
}
/**
* Constructs an authorization object from a collection of string authorizations that have each
* already been encoded as UTF-8 bytes. Warning: This method does not verify that each encoded
* string is valid UTF-8.
*
* @param authorizations collection of authorizations, as strings encoded in UTF-8
* @throws IllegalArgumentException if authorizations is null
* @see #Authorizations(String...)
*/
public Authorizations(Collection<byte[]> authorizations) {
checkArgument(authorizations != null, "authorizations is null");
this.auths = createInternalSet(authorizations.size());
this.authsList = createInternalList(authorizations.size());
for (byte[] auth : authorizations) {
auths.add(new ArrayByteSequence(auth));
}
checkAuths();
}
/**
* Constructs an authorization object from a list of string authorizations that have each already
* been encoded as UTF-8 bytes. Warning: This method does not verify that each encoded string is
* valid UTF-8.
*
* @param authorizations list of authorizations, as strings encoded in UTF-8 and placed in buffers
* @throws IllegalArgumentException if authorizations is null
* @see #Authorizations(String...)
*/
public Authorizations(List<ByteBuffer> authorizations) {
checkArgument(authorizations != null, "authorizations is null");
this.auths = createInternalSet(authorizations.size());
this.authsList = createInternalList(authorizations.size());
for (ByteBuffer buffer : authorizations) {
auths.add(new ArrayByteSequence(ByteBufferUtil.toBytes(buffer)));
}
checkAuths();
}
/**
* Constructs an authorizations object from a serialized form. This is NOT a constructor for a set
* of authorizations of size one. Warning: This method does not verify that the encoded serialized
* form is valid UTF-8.
*
* @param authorizations a serialized authorizations string produced by
* {@link #getAuthorizationsArray()} or {@link #serialize()}, converted to UTF-8 bytes
* @throws IllegalArgumentException if authorizations is null
*/
public Authorizations(byte[] authorizations) {
checkArgument(authorizations != null, "authorizations is null");
String authsString = new String(authorizations, UTF_8);
if (authsString.startsWith(HEADER)) {
// it's the new format
authsString = authsString.substring(HEADER.length());
String[] parts = authsString.split(",");
this.auths = createInternalSet(parts.length);
this.authsList = createInternalList(parts.length);
if (!authsString.isEmpty()) {
for (String encAuth : parts) {
byte[] auth = Base64.getDecoder().decode(encAuth.getBytes(UTF_8));
auths.add(new ArrayByteSequence(auth));
}
checkAuths();
}
} else {
// it's the old format
if (authorizations.length > 0) {
String[] parts = authsString.split(",");
this.auths = createInternalSet(parts.length);
this.authsList = createInternalList(parts.length);
setAuthorizations(parts);
} else {
this.auths = EMPTY_AUTH_SET;
this.authsList = EMPTY_AUTH_LIST;
}
}
}
/**
* Constructs an empty set of authorizations.
*
* @see #Authorizations(String...)
*/
public Authorizations() {
this.auths = EMPTY_AUTH_SET;
this.authsList = EMPTY_AUTH_LIST;
}
/**
* Constructs an authorizations object from a set of human-readable authorizations.
*
* @param authorizations array of authorizations
* @throws IllegalArgumentException if authorizations is null
*/
public Authorizations(String... authorizations) {
checkArgument(authorizations != null, "authorizations is null");
this.auths = createInternalSet(authorizations.length);
this.authsList = createInternalList(authorizations.length);
setAuthorizations(authorizations);
}
private void setAuthorizations(String... authorizations) {
checkArgument(authorizations != null, "authorizations is null");
auths.clear();
for (String str : authorizations) {
str = str.trim();
auths.add(new ArrayByteSequence(str.getBytes(UTF_8)));
}
checkAuths();
}
/**
* Returns a serialized form of these authorizations.
*
* @return serialized form of these authorizations, as a string encoded in UTF-8
* @see #serialize()
*/
public byte[] getAuthorizationsArray() {
return serialize().getBytes(UTF_8);
}
/**
* Gets the authorizations in sorted order. The returned list is not modifiable.
*
* @return authorizations, each as a string encoded in UTF-8
* @see #Authorizations(Collection)
*/
public List<byte[]> getAuthorizations() {
ArrayList<byte[]> copy = new ArrayList<>(authsList.size());
for (byte[] auth : authsList) {
byte[] bytes = new byte[auth.length];
System.arraycopy(auth, 0, bytes, 0, auth.length);
copy.add(bytes);
}
return Collections.unmodifiableList(copy);
}
/**
* Gets the authorizations in sorted order. The returned list is not modifiable.
*
* @return authorizations, each as a string encoded in UTF-8 and within a buffer
*/
public List<ByteBuffer> getAuthorizationsBB() {
ArrayList<ByteBuffer> copy = new ArrayList<>(authsList.size());
for (byte[] auth : authsList) {
byte[] bytes = new byte[auth.length];
System.arraycopy(auth, 0, bytes, 0, auth.length);
copy.add(ByteBuffer.wrap(bytes));
}
return Collections.unmodifiableList(copy);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
String sep = "";
for (ByteSequence auth : auths) {
sb.append(sep);
sep = ",";
sb.append(new String(auth.toArray(), UTF_8));
}
return sb.toString();
}
/**
* Checks whether this object contains the given authorization.
*
* @param auth authorization, as a string encoded in UTF-8
* @return true if authorization is in this collection
*/
public boolean contains(byte[] auth) {
return auths.contains(new ArrayByteSequence(auth));
}
/**
* Checks whether this object contains the given authorization. Warning: This method does not
* verify that the encoded string is valid UTF-8.
*
* @param auth authorization, as a string encoded in UTF-8
* @return true if authorization is in this collection
*/
@Override
public boolean contains(ByteSequence auth) {
return auths.contains(auth);
}
/**
* Checks whether this object contains the given authorization.
*
* @param auth authorization
* @return true if authorization is in this collection
*/
public boolean contains(String auth) {
return auths.contains(new ArrayByteSequence(auth));
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (o instanceof Authorizations) {
Authorizations ao = (Authorizations) o;
return auths.equals(ao.auths);
}
return false;
}
@Override
public int hashCode() {
int result = 0;
for (ByteSequence b : auths) {
result += b.hashCode();
}
return result;
}
/**
* Gets the size of this collection of authorizations.
*
* @return collection size
*/
public int size() {
return auths.size();
}
/**
* Checks if this collection of authorizations is empty.
*
* @return true if this collection contains no authorizations
*/
public boolean isEmpty() {
return auths.isEmpty();
}
@Override
public Iterator<byte[]> iterator() {
return getAuthorizations().iterator();
}
/**
* Returns a serialized form of these authorizations. Convert the returned string to UTF-8 bytes
* to deserialize with {@link #Authorizations(byte[])}.
*
* @return serialized form of authorizations
*/
public String serialize() {
StringBuilder sb = new StringBuilder(HEADER);
String sep = "";
for (byte[] auth : authsList) {
sb.append(sep);
sep = ",";
sb.append(Base64.getEncoder().encodeToString(auth));
}
return sb.toString();
}
}
| 9,701 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/lock/ServiceLock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.lock;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Objects.requireNonNull;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import org.apache.accumulo.core.fate.zookeeper.ZooCache;
import org.apache.accumulo.core.fate.zookeeper.ZooCache.ZcStat;
import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.LockID;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeMissingPolicy;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.Code;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.EventType;
import org.apache.zookeeper.Watcher.Event.KeeperState;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ServiceLock implements Watcher {
private static final Logger LOG = LoggerFactory.getLogger(ServiceLock.class);
private static final String ZLOCK_PREFIX = "zlock#";
private static class Prefix {
private final String prefix;
public Prefix(String prefix) {
this.prefix = prefix;
}
@Override
public String toString() {
return this.prefix;
}
}
public static class ServiceLockPath {
private final String path;
private ServiceLockPath(String path) {
this.path = requireNonNull(path);
}
@Override
public String toString() {
return this.path;
}
}
public static ServiceLockPath path(String path) {
return new ServiceLockPath(path);
}
public enum LockLossReason {
LOCK_DELETED, SESSION_EXPIRED
}
public interface LockWatcher {
void lostLock(LockLossReason reason);
/**
* lost the ability to monitor the lock node, and its status is unknown
*/
void unableToMonitorLockNode(Exception e);
}
public interface AccumuloLockWatcher extends LockWatcher {
void acquiredLock();
void failedToAcquireLock(Exception e);
}
private final ServiceLockPath path;
protected final ZooKeeper zooKeeper;
private final Prefix vmLockPrefix;
private LockWatcher lockWatcher;
private String lockNodeName;
private volatile boolean lockWasAcquired;
private volatile boolean watchingParent = false;
private String createdNodeName;
private String watchingNodeName;
public ServiceLock(ZooKeeper zookeeper, ServiceLockPath path, UUID uuid) {
this.zooKeeper = requireNonNull(zookeeper);
this.path = requireNonNull(path);
try {
zooKeeper.exists(path.toString(), this);
watchingParent = true;
this.vmLockPrefix = new Prefix(ZLOCK_PREFIX + uuid.toString() + "#");
} catch (KeeperException | InterruptedException ex) {
LOG.error("Error setting initial watch", ex);
throw new IllegalStateException(ex);
}
}
private static class LockWatcherWrapper implements AccumuloLockWatcher {
boolean acquiredLock = false;
LockWatcher lw;
public LockWatcherWrapper(LockWatcher lw2) {
this.lw = lw2;
}
@Override
public void acquiredLock() {
acquiredLock = true;
}
@Override
public void failedToAcquireLock(Exception e) {
LOG.debug("Failed to acquire lock", e);
}
@Override
public void lostLock(LockLossReason reason) {
lw.lostLock(reason);
}
@Override
public void unableToMonitorLockNode(Exception e) {
lw.unableToMonitorLockNode(e);
}
}
public synchronized boolean tryLock(LockWatcher lw, ServiceLockData lockData)
throws KeeperException, InterruptedException {
LockWatcherWrapper lww = new LockWatcherWrapper(lw);
lock(lww, lockData);
if (lww.acquiredLock) {
return true;
}
// If we didn't acquire the lock, then delete the path we just created
if (createdNodeName != null) {
String pathToDelete = path + "/" + createdNodeName;
LOG.debug("[{}] Failed to acquire lock in tryLock(), deleting all at path: {}", vmLockPrefix,
pathToDelete);
ZooUtil.recursiveDelete(zooKeeper, pathToDelete, NodeMissingPolicy.SKIP);
createdNodeName = null;
}
return false;
}
/**
* Sort list of ephemeral nodes by their sequence number. Any ephemeral nodes that are not of the
* correct form will sort last.
*
* @param children list of ephemeral nodes
* @return list of ephemeral nodes that have valid formats, sorted by sequence number
*/
public static List<String> validateAndSort(ServiceLockPath path, List<String> children) {
LOG.trace("validating and sorting children at path {}", path);
List<String> validChildren = new ArrayList<>();
if (children == null || children.isEmpty()) {
return validChildren;
}
children.forEach(c -> {
LOG.trace("Validating {}", c);
if (c.startsWith(ZLOCK_PREFIX)) {
String candidate = c.substring(ZLOCK_PREFIX.length() + 1);
if (candidate.contains("#")) {
int idx = candidate.indexOf('#');
String uuid = candidate.substring(0, idx - 1);
String sequenceNum = candidate.substring(idx + 1);
try {
LOG.trace("Testing uuid format of {}", uuid);
UUID.fromString(uuid);
if (sequenceNum.length() == 10) {
try {
LOG.trace("Testing number format of {}", sequenceNum);
Integer.parseInt(sequenceNum);
validChildren.add(c);
} catch (NumberFormatException e) {
LOG.warn("Child found with invalid sequence format: {} (not a number)", c);
}
} else {
LOG.warn("Child found with invalid sequence format: {} (not 10 characters)", c);
}
} catch (IllegalArgumentException e) {
LOG.warn("Child found with invalid UUID format: {}", c);
}
} else {
LOG.warn("Child found with invalid format: {} (does not contain second '#')", c);
}
} else {
LOG.warn("Child found with invalid format: {} (does not start with {})", c, ZLOCK_PREFIX);
}
});
if (validChildren.size() > 1) {
validChildren.sort((o1, o2) -> {
// Lock should be of the form:
// zlock#UUID#sequenceNumber
// Example:
// zlock#44755fbe-1c9e-40b3-8458-03abaf950d7e#0000000000
int secondHashIdx = 43;
return Integer.valueOf(o1.substring(secondHashIdx))
.compareTo(Integer.valueOf(o2.substring(secondHashIdx)));
});
}
LOG.trace("Children nodes (size: {}): {}", validChildren.size(), validChildren);
return validChildren;
}
/**
* Given a pre-sorted set of children ephemeral nodes where the node name is of the form
* "zlock#UUID#sequenceNumber", find the ephemeral node that sorts before the ephemeralNode
* parameter with the lowest sequence number
*
* @param children list of sequential ephemera nodes, already sorted
* @param ephemeralNode starting node for the search
* @return next lowest prefix with the lowest sequence number
*/
public static String findLowestPrevPrefix(final List<String> children,
final String ephemeralNode) {
int idx = children.indexOf(ephemeralNode);
// Get the prefix from the prior ephemeral node
String prev = children.get(idx - 1);
int prefixIdx = prev.lastIndexOf('#');
String prevPrefix = prev.substring(0, prefixIdx);
// Find the lowest sequential ephemeral node with prevPrefix
int i = 2;
String lowestPrevNode = prev;
while ((idx - i) >= 0) {
prev = children.get(idx - i);
i++;
if (prev.startsWith(prevPrefix)) {
lowestPrevNode = prev;
} else {
break;
}
}
return lowestPrevNode;
}
private synchronized void determineLockOwnership(final String createdEphemeralNode,
final AccumuloLockWatcher lw) throws KeeperException, InterruptedException {
if (createdNodeName == null) {
throw new IllegalStateException(
"Called determineLockOwnership() when ephemeralNodeName == null");
}
List<String> children = validateAndSort(path, zooKeeper.getChildren(path.toString(), null));
if (null == children || !children.contains(createdEphemeralNode)) {
LOG.error("Expected ephemeral node {} to be in the list of children {}", createdEphemeralNode,
children);
throw new IllegalStateException(
"Lock attempt ephemeral node no longer exist " + createdEphemeralNode);
}
if (children.get(0).equals(createdEphemeralNode)) {
LOG.debug("[{}] First candidate is my lock, acquiring...", vmLockPrefix);
if (!watchingParent) {
throw new IllegalStateException(
"Can not acquire lock, no longer watching parent : " + path);
}
this.lockWatcher = lw;
this.lockNodeName = createdEphemeralNode;
createdNodeName = null;
lockWasAcquired = true;
lw.acquiredLock();
} else {
LOG.debug("[{}] Lock held by another process with ephemeral node: {}", vmLockPrefix,
children.get(0));
String lowestPrevNode = findLowestPrevPrefix(children, createdEphemeralNode);
watchingNodeName = path + "/" + lowestPrevNode;
final String nodeToWatch = watchingNodeName;
LOG.debug("[{}] Establishing watch on prior node {}", vmLockPrefix, nodeToWatch);
Watcher priorNodeWatcher = new Watcher() {
@Override
public void process(WatchedEvent event) {
if (LOG.isTraceEnabled()) {
LOG.trace("[{}] Processing {}", vmLockPrefix, event);
}
boolean renew = true;
if (event.getType() == EventType.NodeDeleted && event.getPath().equals(nodeToWatch)) {
LOG.debug("[{}] Detected deletion of prior node {}, attempting to acquire lock; {}",
vmLockPrefix, nodeToWatch, event);
synchronized (ServiceLock.this) {
try {
if (createdNodeName != null) {
determineLockOwnership(createdEphemeralNode, lw);
} else if (LOG.isDebugEnabled()) {
LOG.debug("[{}] While waiting for another lock {}, {} was deleted; {}",
vmLockPrefix, nodeToWatch, createdEphemeralNode, event);
}
} catch (Exception e) {
if (lockNodeName == null) {
// have not acquired lock yet
lw.failedToAcquireLock(e);
}
}
}
renew = false;
}
if (event.getState() == KeeperState.Expired
|| event.getState() == KeeperState.Disconnected) {
synchronized (ServiceLock.this) {
if (lockNodeName == null) {
LOG.info("Zookeeper Session expired / disconnected; {}", event);
lw.failedToAcquireLock(
new Exception("Zookeeper Session expired / disconnected; " + event));
}
}
renew = false;
}
if (renew) {
try {
Stat restat = zooKeeper.exists(nodeToWatch, this);
if (restat == null) {
// if stat is null from the zookeeper.exists(path, Watcher) call, then we just
// created a Watcher on a node that does not exist. Delete the watcher we just
// created.
zooKeeper.removeWatches(nodeToWatch, this, WatcherType.Any, true);
determineLockOwnership(createdEphemeralNode, lw);
} else {
LOG.debug("[{}] Renewed watch on prior node {}", vmLockPrefix, nodeToWatch);
}
} catch (KeeperException | InterruptedException e) {
lw.failedToAcquireLock(
new Exception("Failed to renew watch on other manager node", e));
}
}
}
};
Stat stat = zooKeeper.exists(nodeToWatch, priorNodeWatcher);
if (stat == null) {
// if stat is null from the zookeeper.exists(path, Watcher) call, then we just
// created a Watcher on a node that does not exist. Delete the watcher we just created.
zooKeeper.removeWatches(nodeToWatch, priorNodeWatcher, WatcherType.Any, true);
determineLockOwnership(createdEphemeralNode, lw);
}
}
}
private void lostLock(LockLossReason reason) {
LockWatcher localLw = lockWatcher;
lockNodeName = null;
lockWatcher = null;
localLw.lostLock(reason);
}
public synchronized void lock(final AccumuloLockWatcher lw, ServiceLockData lockData) {
if (lockWatcher != null || lockNodeName != null || createdNodeName != null) {
throw new IllegalStateException();
}
lockWasAcquired = false;
try {
final String lockPathPrefix = path + "/" + vmLockPrefix.toString();
// Implement recipe at https://zookeeper.apache.org/doc/current/recipes.html#sc_recipes_Locks
// except that instead of the ephemeral lock node being of the form guid-lock- use lock-guid-.
// Another deviation from the recipe is that we cleanup any extraneous ephemeral nodes that
// were created.
final String createPath = zooKeeper.create(lockPathPrefix, lockData.serialize(),
ZooUtil.PUBLIC, CreateMode.EPHEMERAL_SEQUENTIAL);
LOG.debug("[{}] Ephemeral node {} created with data: {}", vmLockPrefix, createPath, lockData);
// It's possible that the call above was retried several times and multiple ephemeral nodes
// were created but the client missed the response for some reason. Find the ephemeral nodes
// with this ZLOCK_UUID and lowest sequential number.
List<String> children = validateAndSort(path, zooKeeper.getChildren(path.toString(), null));
if (null == children
|| !children.contains(createPath.substring(path.toString().length() + 1))) {
LOG.error("Expected ephemeral node {} to be in the list of children {}", createPath,
children);
throw new IllegalStateException(
"Lock attempt ephemeral node no longer exist " + createPath);
}
String lowestSequentialPath = null;
boolean msgLoggedOnce = false;
for (String child : children) {
if (child.startsWith(vmLockPrefix.toString())) {
if (null == lowestSequentialPath) {
if (createPath.equals(path + "/" + child)) {
// the path returned from create is the lowest sequential one
lowestSequentialPath = createPath;
break;
}
lowestSequentialPath = path + "/" + child;
LOG.debug("[{}] lowest sequential node found: {}", vmLockPrefix, lowestSequentialPath);
} else {
if (!msgLoggedOnce) {
LOG.info(
"[{}] Zookeeper client missed server response, multiple ephemeral child nodes created at {}",
vmLockPrefix, lockPathPrefix);
msgLoggedOnce = true;
}
LOG.debug("[{}] higher sequential node found: {}, deleting it", vmLockPrefix, child);
try {
zooKeeper.delete(path + "/" + child, -1);
} catch (KeeperException e) {
// ignore the case where the node doesn't exist
if (e.code() != Code.NONODE) {
throw e;
}
}
}
}
}
final String pathForWatcher = lowestSequentialPath;
// Set a watcher on the lowest sequential node that we created, this handles the case
// where the node we created is deleted or if this client becomes disconnected.
LOG.debug("[{}] Setting watcher on {}", vmLockPrefix, pathForWatcher);
Watcher watcherForNodeWeCreated = new Watcher() {
private void failedToAcquireLock() {
LOG.debug("[{}] Lock deleted before acquired, setting createdNodeName {} to null",
vmLockPrefix, createdNodeName);
lw.failedToAcquireLock(new Exception("Lock deleted before acquired"));
createdNodeName = null;
}
@Override
public void process(WatchedEvent event) {
synchronized (ServiceLock.this) {
if (lockNodeName != null && event.getType() == EventType.NodeDeleted
&& event.getPath().equals(path + "/" + lockNodeName)) {
LOG.debug("[{}] {} was deleted; {}", vmLockPrefix, lockNodeName, event);
lostLock(LockLossReason.LOCK_DELETED);
} else if (createdNodeName != null && event.getType() == EventType.NodeDeleted
&& event.getPath().equals(path + "/" + createdNodeName)) {
LOG.debug("[{}] {} was deleted; {}", vmLockPrefix, createdNodeName, event);
failedToAcquireLock();
} else if (event.getState() != KeeperState.Disconnected
&& event.getState() != KeeperState.Expired
&& (lockNodeName != null || createdNodeName != null)) {
LOG.debug("Unexpected event watching lock node {}; {}", pathForWatcher, event);
try {
Stat stat2 = zooKeeper.exists(pathForWatcher, this);
if (stat2 == null) {
// if stat is null from the zookeeper.exists(path, Watcher) call, then we just
// created a Watcher on a node that does not exist. Delete the watcher we just
// created.
zooKeeper.removeWatches(pathForWatcher, this, WatcherType.Any, true);
if (lockNodeName != null) {
lostLock(LockLossReason.LOCK_DELETED);
} else if (createdNodeName != null) {
failedToAcquireLock();
}
}
} catch (Exception e) {
lockWatcher.unableToMonitorLockNode(e);
LOG.error("Failed to stat lock node: {}; {}", pathForWatcher, event, e);
}
}
}
}
};
Stat stat = zooKeeper.exists(pathForWatcher, watcherForNodeWeCreated);
if (stat == null) {
// if stat is null from the zookeeper.exists(path, Watcher) call, then we just
// created a Watcher on a node that does not exist. Delete the watcher we just created.
zooKeeper.removeWatches(pathForWatcher, watcherForNodeWeCreated, WatcherType.Any, true);
lw.failedToAcquireLock(new Exception("Lock does not exist after create"));
return;
}
createdNodeName = pathForWatcher.substring(path.toString().length() + 1);
// We have created a node, do we own the lock?
determineLockOwnership(createdNodeName, lw);
} catch (KeeperException | InterruptedException e) {
lw.failedToAcquireLock(e);
}
}
public synchronized boolean tryToCancelAsyncLockOrUnlock()
throws InterruptedException, KeeperException {
boolean del = false;
if (createdNodeName != null) {
String pathToDelete = path + "/" + createdNodeName;
LOG.debug("[{}] Deleting all at path {} due to lock cancellation", vmLockPrefix,
pathToDelete);
ZooUtil.recursiveDelete(zooKeeper, pathToDelete, NodeMissingPolicy.SKIP);
del = true;
}
if (lockNodeName != null) {
unlock();
del = true;
}
return del;
}
public synchronized void unlock() throws InterruptedException, KeeperException {
if (lockNodeName == null) {
throw new IllegalStateException();
}
LockWatcher localLw = lockWatcher;
String localLock = lockNodeName;
lockNodeName = null;
lockWatcher = null;
final String pathToDelete = path + "/" + localLock;
LOG.debug("[{}] Deleting all at path {} due to unlock", vmLockPrefix, pathToDelete);
ZooUtil.recursiveDelete(zooKeeper, pathToDelete, NodeMissingPolicy.SKIP);
localLw.lostLock(LockLossReason.LOCK_DELETED);
}
/**
* @return path of node that this lock is watching
*/
public synchronized String getWatching() {
return watchingNodeName;
}
public synchronized String getLockPath() {
if (lockNodeName == null) {
return null;
}
return path + "/" + lockNodeName;
}
public synchronized String getLockName() {
return lockNodeName;
}
public synchronized LockID getLockID() {
if (lockNodeName == null) {
throw new IllegalStateException("Lock not held");
}
return new LockID(path.toString(), lockNodeName, zooKeeper.getSessionId());
}
/**
* indicates if the lock was acquired in the past.... helps discriminate between the case where
* the lock was never held, or held and lost....
*
* @return true if the lock was acquired, otherwise false.
*/
public synchronized boolean wasLockAcquired() {
return lockWasAcquired;
}
public synchronized boolean isLocked() {
return lockNodeName != null;
}
public synchronized void replaceLockData(ServiceLockData lockData)
throws KeeperException, InterruptedException {
if (getLockPath() != null) {
zooKeeper.setData(getLockPath(), lockData.serialize(), -1);
LOG.debug("[{}] Lock data replaced at path {} with data: {}", vmLockPrefix, getLockPath(),
lockData);
}
}
@Override
public synchronized void process(WatchedEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("{}", event);
}
watchingParent = false;
if (event.getState() == KeeperState.Expired && lockNodeName != null) {
lostLock(LockLossReason.SESSION_EXPIRED);
} else {
try { // set the watch on the parent node again
zooKeeper.exists(path.toString(), this);
watchingParent = true;
} catch (KeeperException.ConnectionLossException ex) {
// we can't look at the lock because we aren't connected, but our session is still good
LOG.warn("lost connection to zookeeper", ex);
} catch (Exception ex) {
if (lockNodeName != null || createdNodeName != null) {
lockWatcher.unableToMonitorLockNode(ex);
LOG.error("Error resetting watch on ZooLock {} {}",
lockNodeName != null ? lockNodeName : createdNodeName, event, ex);
}
}
}
}
public static boolean isLockHeld(ZooCache zc, LockID lid) {
var zLockPath = path(lid.path);
List<String> children = validateAndSort(zLockPath, zc.getChildren(zLockPath.toString()));
if (children == null || children.isEmpty()) {
return false;
}
String lockNode = children.get(0);
if (!lid.node.equals(lockNode)) {
return false;
}
ZcStat stat = new ZcStat();
return zc.get(lid.path + "/" + lid.node, stat) != null && stat.getEphemeralOwner() == lid.eid;
}
public static Optional<ServiceLockData> getLockData(ZooKeeper zk, ServiceLockPath path)
throws KeeperException, InterruptedException {
List<String> children = validateAndSort(path, zk.getChildren(path.toString(), null));
if (children == null || children.isEmpty()) {
return Optional.empty();
}
String lockNode = children.get(0);
byte[] data = zk.getData(path + "/" + lockNode, false, null);
if (data == null) {
data = new byte[0];
}
return ServiceLockData.parse(data);
}
public static Optional<ServiceLockData> getLockData(
org.apache.accumulo.core.fate.zookeeper.ZooCache zc, ServiceLockPath path, ZcStat stat) {
List<String> children = validateAndSort(path, zc.getChildren(path.toString()));
if (children == null || children.isEmpty()) {
return Optional.empty();
}
String lockNode = children.get(0);
if (!lockNode.startsWith(ZLOCK_PREFIX)) {
throw new IllegalStateException("Node " + lockNode + " at " + path + " is not a lock node");
}
byte[] data = zc.get(path + "/" + lockNode, stat);
if (data == null) {
data = new byte[0];
}
return ServiceLockData.parse(data);
}
public static long getSessionId(ZooCache zc, ServiceLockPath path) {
List<String> children = validateAndSort(path, zc.getChildren(path.toString()));
if (children == null || children.isEmpty()) {
return 0;
}
String lockNode = children.get(0);
ZcStat stat = new ZcStat();
if (zc.get(path + "/" + lockNode, stat) != null) {
return stat.getEphemeralOwner();
}
return 0;
}
public long getSessionId() throws KeeperException, InterruptedException {
List<String> children = validateAndSort(path, zooKeeper.getChildren(path.toString(), null));
String lockNode = children.get(0);
Stat stat = zooKeeper.exists(path + "/" + lockNode, null);
if (null != stat) {
return stat.getEphemeralOwner();
} else {
return 0;
}
}
public static void deleteLock(ZooReaderWriter zk, ServiceLockPath path)
throws InterruptedException, KeeperException {
List<String> children = validateAndSort(path, zk.getChildren(path.toString()));
if (children == null || children.isEmpty()) {
throw new IllegalStateException("No lock is held at " + path);
}
String lockNode = children.get(0);
if (!lockNode.startsWith(ZLOCK_PREFIX)) {
throw new IllegalStateException("Node " + lockNode + " at " + path + " is not a lock node");
}
String pathToDelete = path + "/" + lockNode;
LOG.debug("Deleting all at path {} due to lock deletion", pathToDelete);
zk.recursiveDelete(pathToDelete, NodeMissingPolicy.SKIP);
}
public static boolean deleteLock(ZooReaderWriter zk, ServiceLockPath path, String lockData)
throws InterruptedException, KeeperException {
List<String> children = validateAndSort(path, zk.getChildren(path.toString()));
if (children == null || children.isEmpty()) {
throw new IllegalStateException("No lock is held at " + path);
}
String lockNode = children.get(0);
if (!lockNode.startsWith(ZLOCK_PREFIX)) {
throw new IllegalStateException("Node " + lockNode + " at " + path + " is not a lock node");
}
byte[] data = zk.getData(path + "/" + lockNode);
if (lockData.equals(new String(data, UTF_8))) {
String pathToDelete = path + "/" + lockNode;
LOG.debug("Deleting all at path {} due to lock deletion", pathToDelete);
zk.recursiveDelete(pathToDelete, NodeMissingPolicy.FAIL);
return true;
}
return false;
}
}
| 9,702 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.lock;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Objects.requireNonNull;
import static org.apache.accumulo.core.util.LazySingletons.GSON;
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashSet;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import org.apache.accumulo.core.util.AddressUtil;
import com.google.common.net.HostAndPort;
public class ServiceLockData implements Comparable<ServiceLockData> {
/**
* Thrift Service list
*/
public static enum ThriftService {
CLIENT,
COORDINATOR,
COMPACTOR,
FATE,
GC,
MANAGER,
NONE,
TABLET_INGEST,
TABLET_MANAGEMENT,
TABLET_SCAN,
TSERV
}
/**
* An object that describes a process, the group assigned to that process, the Thrift service and
* the address to use to communicate with that service.
*/
public static class ServiceDescriptor {
/**
* The group name that will be used when one is not specified.
*/
public static final String DEFAULT_GROUP_NAME = "default";
private final UUID uuid;
private final ThriftService service;
private final String address;
private final String group;
public ServiceDescriptor(UUID uuid, ThriftService service, String address) {
this(uuid, service, address, DEFAULT_GROUP_NAME);
}
public ServiceDescriptor(UUID uuid, ThriftService service, String address, String group) {
this.uuid = requireNonNull(uuid);
this.service = requireNonNull(service);
this.address = requireNonNull(address);
this.group = requireNonNull(group);
}
public UUID getUUID() {
return uuid;
}
public ThriftService getService() {
return service;
}
public String getAddress() {
return address;
}
public String getGroup() {
return group;
}
@Override
public int hashCode() {
return toString().hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
ServiceDescriptor other = (ServiceDescriptor) obj;
return toString().equals(other.toString());
}
@Override
public String toString() {
return GSON.get().toJson(this);
}
}
/**
* A set of ServiceDescriptor's
*/
public static class ServiceDescriptors {
private final Set<ServiceDescriptor> descriptors;
public ServiceDescriptors() {
descriptors = new HashSet<>();
}
public ServiceDescriptors(HashSet<ServiceDescriptor> descriptors) {
this.descriptors = descriptors;
}
public void addService(ServiceDescriptor sd) {
this.descriptors.add(sd);
}
public Set<ServiceDescriptor> getServices() {
return descriptors;
}
}
private EnumMap<ThriftService,ServiceDescriptor> services;
public ServiceLockData(ServiceDescriptors sds) {
this.services = new EnumMap<>(ThriftService.class);
sds.getServices().forEach(sd -> this.services.put(sd.getService(), sd));
}
public ServiceLockData(UUID uuid, String address, ThriftService service, String group) {
this(new ServiceDescriptors(new HashSet<>(
Collections.singleton(new ServiceDescriptor(uuid, service, address, group)))));
}
public ServiceLockData(UUID uuid, String address, ThriftService service) {
this(new ServiceDescriptors(
new HashSet<>(Collections.singleton(new ServiceDescriptor(uuid, service, address)))));
}
public String getAddressString(ThriftService service) {
ServiceDescriptor sd = services.get(service);
return sd == null ? null : sd.getAddress();
}
public HostAndPort getAddress(ThriftService service) {
String s = getAddressString(service);
return s == null ? null : AddressUtil.parseAddress(s, false);
}
public String getGroup(ThriftService service) {
ServiceDescriptor sd = services.get(service);
return sd == null ? null : sd.getGroup();
}
public UUID getServerUUID(ThriftService service) {
ServiceDescriptor sd = services.get(service);
return sd == null ? null : sd.getUUID();
}
public byte[] serialize() {
ServiceDescriptors sd = new ServiceDescriptors();
services.values().forEach(s -> sd.addService(s));
return GSON.get().toJson(sd).getBytes(UTF_8);
}
@Override
public String toString() {
return new String(serialize(), UTF_8);
}
@Override
public int hashCode() {
return toString().hashCode();
}
@Override
public boolean equals(Object o) {
return o instanceof ServiceLockData ? Objects.equals(toString(), o.toString()) : false;
}
@Override
public int compareTo(ServiceLockData other) {
return toString().compareTo(other.toString());
}
public static Optional<ServiceLockData> parse(byte[] lockData) {
if (lockData == null) {
return Optional.empty();
}
String data = new String(lockData, UTF_8);
return data.isBlank() ? Optional.empty()
: Optional.of(new ServiceLockData(GSON.get().fromJson(data, ServiceDescriptors.class)));
}
}
| 9,703 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
import java.util.Map;
public enum CompactionSettings {
SF_NO_SUMMARY(new NullType(), true),
SF_EXTRA_SUMMARY(new NullType(), true),
SF_NO_SAMPLE(new NullType(), true),
SF_GT_ESIZE_OPT(new SizeType(), true),
SF_LT_ESIZE_OPT(new SizeType(), true),
SF_NAME_RE_OPT(new PatternType(), true),
SF_PATH_RE_OPT(new PatternType(), true),
MIN_FILES_OPT(new UIntType(), true),
OUTPUT_COMPRESSION_OPT(new StringType(), false),
OUTPUT_BLOCK_SIZE_OPT(new SizeType(), false),
OUTPUT_HDFS_BLOCK_SIZE_OPT(new SizeType(), false),
OUTPUT_INDEX_BLOCK_SIZE_OPT(new SizeType(), false),
OUTPUT_REPLICATION_OPT(new UIntType(), false);
private Type type;
private boolean selectorOpt;
private CompactionSettings(Type type, boolean selectorOpt) {
this.type = type;
this.selectorOpt = selectorOpt;
}
public void put(Map<String,String> selectorOpts, Map<String,String> configurerOpts, String val) {
if (selectorOpt) {
selectorOpts.put(name(), type.convert(val));
} else {
configurerOpts.put(name(), type.convert(val));
}
}
}
| 9,704 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/compaction/Type.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
interface Type {
String convert(String str);
}
| 9,705 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/compaction/NullType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
import static com.google.common.base.Preconditions.checkArgument;
public class NullType implements Type {
@Override
public String convert(String str) {
checkArgument(str == null);
return "";
}
}
| 9,706 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
import static com.google.common.base.Preconditions.checkArgument;
class UIntType implements Type {
@Override
public String convert(String str) {
checkArgument(Integer.parseInt(str) > 0);
return str;
}
}
| 9,707 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/compaction/ShellCompactCommandConfigurer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer;
import org.apache.accumulo.core.conf.Property;
/**
* The compaction configurer is used by the shell compact command. It exists in accumulo-core, so it
* is on the class path for the shell and servers that run compactions.
*/
public class ShellCompactCommandConfigurer implements CompactionConfigurer {
private Map<String,String> overrides = new HashMap<>();
@Override
public void init(InitParameters iparams) {
Set<Entry<String,String>> es = iparams.getOptions().entrySet();
for (Entry<String,String> entry : es) {
switch (CompactionSettings.valueOf(entry.getKey())) {
case OUTPUT_COMPRESSION_OPT:
overrides.put(Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), entry.getValue());
break;
case OUTPUT_BLOCK_SIZE_OPT:
overrides.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), entry.getValue());
break;
case OUTPUT_INDEX_BLOCK_SIZE_OPT:
overrides.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(), entry.getValue());
break;
case OUTPUT_HDFS_BLOCK_SIZE_OPT:
overrides.put(Property.TABLE_FILE_BLOCK_SIZE.getKey(), entry.getValue());
break;
case OUTPUT_REPLICATION_OPT:
overrides.put(Property.TABLE_FILE_REPLICATION.getKey(), entry.getValue());
break;
default:
throw new IllegalArgumentException("Unknown option " + entry.getKey());
}
}
}
@Override
public Overrides override(InputParameters params) {
return new Overrides(overrides);
}
}
| 9,708 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
class StringType implements Type {
@Override
public String convert(String str) {
return str;
}
}
| 9,709 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
import java.util.regex.Pattern;
class PatternType implements Type {
@Override
public String convert(String str) {
// ensure it compiles
Pattern.compile(str);
return str;
}
}
| 9,710 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/compaction/ShellCompactCommandSelector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
import org.apache.accumulo.core.client.admin.compaction.CompactionSelector;
import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
import org.apache.accumulo.core.client.summary.Summary;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.hadoop.fs.Path;
/**
* The compaction selector is used by the shell compact command. It exists in accumulo-core, so it
* is on the class path for the shell and servers that run compactions.
*/
public class ShellCompactCommandSelector implements CompactionSelector {
private abstract static class Test {
abstract Set<CompactableFile> getFilesToCompact(SelectionParameters params);
}
private static class SummaryTest extends Test {
private boolean selectExtraSummary;
private boolean selectNoSummary;
public SummaryTest(boolean selectExtraSummary, boolean selectNoSummary) {
this.selectExtraSummary = selectExtraSummary;
this.selectNoSummary = selectNoSummary;
}
@Override
Set<CompactableFile> getFilesToCompact(SelectionParameters params) {
Collection<SummarizerConfiguration> configs = SummarizerConfiguration
.fromTableProperties(params.getEnvironment().getConfiguration(params.getTableId()));
if (configs.isEmpty()) {
return Set.of();
} else {
Set<CompactableFile> filesToCompact = new HashSet<>();
Set<SummarizerConfiguration> configsSet = configs instanceof Set
? (Set<SummarizerConfiguration>) configs : new HashSet<>(configs);
for (CompactableFile tabletFile : params.getAvailableFiles()) {
Map<SummarizerConfiguration,Summary> sMap = new HashMap<>();
Collection<Summary> summaries;
summaries =
params.getSummaries(Collections.singletonList(tabletFile), configsSet::contains);
for (Summary summary : summaries) {
sMap.put(summary.getSummarizerConfiguration(), summary);
}
for (SummarizerConfiguration sc : configs) {
Summary summary = sMap.get(sc);
if (summary == null && selectNoSummary) {
filesToCompact.add(tabletFile);
break;
}
if (summary != null && summary.getFileStatistics().getExtra() > 0
&& selectExtraSummary) {
filesToCompact.add(tabletFile);
break;
}
}
}
return filesToCompact;
}
}
}
private static class NoSampleTest extends Test {
@Override
Set<CompactableFile> getFilesToCompact(SelectionParameters params) {
SamplerConfigurationImpl sc = SamplerConfigurationImpl.newSamplerConfig(
new ConfigurationCopy(params.getEnvironment().getConfiguration(params.getTableId())));
if (sc == null) {
return Set.of();
}
Set<CompactableFile> filesToCompact = new HashSet<>();
for (CompactableFile tabletFile : params.getAvailableFiles()) {
Optional<SortedKeyValueIterator<Key,Value>> sample =
params.getSample(tabletFile, sc.toSamplerConfiguration());
if (sample.isEmpty()) {
filesToCompact.add(tabletFile);
}
}
return filesToCompact;
}
}
private abstract static class FileSizeTest extends Test {
private final long esize;
private FileSizeTest(String s) {
this.esize = Long.parseLong(s);
}
@Override
Set<CompactableFile> getFilesToCompact(SelectionParameters params) {
return params.getAvailableFiles().stream()
.filter(cf -> shouldCompact(cf.getEstimatedSize(), esize)).collect(Collectors.toSet());
}
public abstract boolean shouldCompact(long fsize, long esize);
}
private abstract static class PatternPathTest extends Test {
private Pattern pattern;
private PatternPathTest(String p) {
this.pattern = Pattern.compile(p);
}
@Override
Set<CompactableFile> getFilesToCompact(SelectionParameters params) {
return params.getAvailableFiles().stream()
.filter(cf -> pattern.matcher(getInput(new Path(cf.getUri()))).matches())
.collect(Collectors.toSet());
}
public abstract String getInput(Path path);
}
private List<Test> tests = new ArrayList<>();
private boolean andTest = true;
private int minFiles = 1;
@Override
public void init(InitParameters iparams) {
boolean selectNoSummary = false;
boolean selectExtraSummary = false;
Set<Entry<String,String>> es = iparams.getOptions().entrySet();
for (Entry<String,String> entry : es) {
switch (CompactionSettings.valueOf(entry.getKey())) {
case SF_EXTRA_SUMMARY:
selectExtraSummary = true;
break;
case SF_NO_SUMMARY:
selectNoSummary = true;
break;
case SF_NO_SAMPLE:
tests.add(new NoSampleTest());
break;
case SF_LT_ESIZE_OPT:
tests.add(new FileSizeTest(entry.getValue()) {
@Override
public boolean shouldCompact(long fsize, long esize) {
return fsize < esize;
}
});
break;
case SF_GT_ESIZE_OPT:
tests.add(new FileSizeTest(entry.getValue()) {
@Override
public boolean shouldCompact(long fsize, long esize) {
return fsize > esize;
}
});
break;
case SF_NAME_RE_OPT:
tests.add(new PatternPathTest(entry.getValue()) {
@Override
public String getInput(Path path) {
return path.getName();
}
});
break;
case SF_PATH_RE_OPT:
tests.add(new PatternPathTest(entry.getValue()) {
@Override
public String getInput(Path path) {
return path.toString();
}
});
break;
case MIN_FILES_OPT:
minFiles = Integer.parseInt(entry.getValue());
break;
default:
throw new IllegalArgumentException("Unknown option " + entry.getKey());
}
}
if (selectExtraSummary || selectNoSummary) {
tests.add(new SummaryTest(selectExtraSummary, selectNoSummary));
}
}
@Override
public Selection select(SelectionParameters sparams) {
Set<CompactableFile> filesToCompact =
tests.isEmpty() ? new HashSet<>(sparams.getAvailableFiles()) : null;
for (Test test : tests) {
var files = test.getFilesToCompact(sparams);
if (filesToCompact == null) {
filesToCompact = files;
} else if (andTest) {
filesToCompact.retainAll(files);
} else {
filesToCompact.addAll(files);
}
}
if (filesToCompact.size() < minFiles) {
return new Selection(Set.of());
}
return new Selection(filesToCompact);
}
}
| 9,711 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
import static com.google.common.base.Preconditions.checkArgument;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
class SizeType implements Type {
@Override
public String convert(String str) {
long size = ConfigurationTypeHelper.getFixedMemoryAsBytes(str);
checkArgument(size > 0);
return Long.toString(size);
}
}
| 9,712 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/cli/Help.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.cli;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
public class Help {
@Parameter(names = {"-h", "-?", "--help", "-help"}, help = true)
public boolean help = false;
public void parseArgs(String programName, String[] args, Object... others) {
JCommander commander = new JCommander();
commander.addObject(this);
for (Object other : others) {
commander.addObject(other);
}
commander.setProgramName(programName);
try {
commander.parse(args);
} catch (ParameterException ex) {
commander.usage();
exitWithError(ex.getMessage(), 1);
}
if (help) {
commander.usage();
exit(0);
}
}
public void exit(int status) {
System.exit(status);
}
public void exitWithError(String message, int status) {
System.err.println(message);
exit(status);
}
}
| 9,713 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.cli;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Scanner;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.clientImpl.ClientInfoImpl;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import com.beust.jcommander.IStringConverter;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
import com.beust.jcommander.converters.IParameterSplitter;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class ClientOpts extends Help {
public static class AuthConverter implements IStringConverter<Authorizations> {
@Override
public Authorizations convert(String value) {
return new Authorizations(value.split(","));
}
}
public static class VisibilityConverter implements IStringConverter<ColumnVisibility> {
@Override
public ColumnVisibility convert(String value) {
return new ColumnVisibility(value);
}
}
public static class NullSplitter implements IParameterSplitter {
@Override
public List<String> split(String value) {
return Collections.singletonList(value);
}
}
public static class PasswordConverter implements IStringConverter<String> {
public static final String STDIN = "stdin";
private enum KeyType {
PASS("pass:"), ENV("env:") {
@Override
String process(String value) {
return System.getenv(value);
}
},
FILE("file:") {
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "app is run in same security context as user providing the filename")
@Override
String process(String value) {
try (Scanner scanner = new Scanner(new File(value), UTF_8)) {
return scanner.nextLine();
} catch (IOException e) {
throw new ParameterException(e);
}
}
},
STDIN(PasswordConverter.STDIN) {
@Override
public boolean matches(String value) {
return prefix.equals(value);
}
@Override
public String convert(String value) {
// Will check for this later
return prefix;
}
};
String prefix;
private KeyType(String prefix) {
this.prefix = prefix;
}
public boolean matches(String value) {
return value.startsWith(prefix);
}
public String convert(String value) {
return process(value.substring(prefix.length()));
}
String process(String value) {
return value;
}
}
@Override
public String convert(String value) {
for (KeyType keyType : KeyType.values()) {
if (keyType.matches(value)) {
return keyType.convert(value);
}
}
return value;
}
}
/**
* A catch all for older legacy options that have been dropped. Most of them were replaced with
* accumulo-client.properties in 2.0. Others have been dropped completely.
*/
private String legacyClientOpts = "-p -tc --tokenClass -i --instance --site-file --keytab "
+ "--debug -fake --mock --ssl --sasl";
@Parameter(names = {"-p", "-tc", "--tokenClass", "-i", "--instance", "--site-file", "--keytab"},
hidden = true)
private String legacyOpts = null;
@Parameter(names = {"--debug", "-fake", "--mock", "--ssl", "--sasl"}, hidden = true)
private boolean legacyOptsBoolean = false;
@Parameter(names = {"-u", "--user"}, description = "Connection user")
public String principal = null;
@Parameter(names = "--password", converter = PasswordConverter.class,
description = "connection password (can be specified as '<password>', 'pass:<password>',"
+ " 'file:<local file containing the password>' or 'env:<variable containing"
+ " the pass>')",
password = true)
private String securePassword = null;
public AuthenticationToken getToken() {
return ClientProperty.getAuthenticationToken(getClientProps());
}
@Parameter(names = {"-auths", "--auths"}, converter = AuthConverter.class,
description = "the authorizations to use when reading or writing")
public Authorizations auths = Authorizations.EMPTY;
@Parameter(names = {"-c", "--config-file"}, description = "Read the given client config file. "
+ "If omitted, the classpath will be searched for file named accumulo-client.properties")
private String clientConfigFile = null;
@Parameter(names = "-o", splitter = NullSplitter.class, description = "Overrides property in "
+ "accumulo-client.properties. Expected format: -o <key>=<value>")
private List<String> overrides = new ArrayList<>();
@Parameter(names = "--trace", description = "turn on distributed tracing")
public boolean trace = false;
public Map<String,String> getOverrides() {
return ConfigOpts.getOverrides(overrides);
}
@Override
public void parseArgs(String programName, String[] args, Object... others) {
super.parseArgs(programName, args, others);
if (legacyOpts != null || legacyOptsBoolean) {
// grab the bad options
StringBuilder badOptions = new StringBuilder();
for (String arg : args) {
if (legacyClientOpts.contains(arg)) {
badOptions.append(arg).append(" ");
}
}
throw new IllegalArgumentException("The Client options: " + badOptions
+ "have been dropped. Use accumulo-client.properties for any connection or token "
+ "options. See '-c, --config-file' option.");
}
}
private Properties cachedProps = null;
public String getClientConfigFile() {
if (clientConfigFile == null) {
URL clientPropsUrl =
ClientOpts.class.getClassLoader().getResource("accumulo-client.properties");
if (clientPropsUrl != null) {
clientConfigFile = clientPropsUrl.getFile();
}
}
return clientConfigFile;
}
public Properties getClientProps() {
if (cachedProps == null) {
cachedProps = new Properties();
if (getClientConfigFile() != null) {
cachedProps = ClientInfoImpl.toProperties(getClientConfigFile());
}
if (principal != null) {
cachedProps.setProperty(ClientProperty.AUTH_PRINCIPAL.getKey(), principal);
}
if (securePassword != null) {
ClientProperty.setPassword(cachedProps, securePassword.toString());
}
getOverrides().forEach((k, v) -> cachedProps.put(k, v));
ClientProperty.validate(cachedProps);
}
return cachedProps;
}
}
| 9,714 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/cli/ConfigOpts.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.cli;
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.conf.SiteConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.converters.IParameterSplitter;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class ConfigOpts extends Help {
private static final Logger log = LoggerFactory.getLogger(ConfigOpts.class);
@Parameter(names = {"-p", "-props", "--props"}, description = "Sets path to accumulo.properties."
+ "The classpath will be searched if this property is not set")
private String propsPath;
public synchronized String getPropertiesPath() {
return propsPath;
}
public static class NullSplitter implements IParameterSplitter {
@Override
public List<String> split(String value) {
return Collections.singletonList(value);
}
}
@Parameter(names = "-o", splitter = NullSplitter.class,
description = "Overrides configuration set in accumulo.properties (but NOT system-wide config"
+ " set in Zookeeper). Expected format: -o <key>=<value>")
private List<String> overrides = new ArrayList<>();
private SiteConfiguration siteConfig = null;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "process runs in same security context as admin who provided path")
public synchronized SiteConfiguration getSiteConfiguration() {
if (siteConfig == null) {
String propsPath = getPropertiesPath();
siteConfig = (propsPath == null ? SiteConfiguration.fromEnv()
: SiteConfiguration.fromFile(new File(propsPath))).withOverrides(getOverrides()).build();
}
return siteConfig;
}
public Map<String,String> getOverrides() {
return getOverrides(overrides);
}
public static Map<String,String> getOverrides(List<String> args) {
Map<String,String> config = new HashMap<>();
for (String prop : args) {
String[] propArgs = prop.split("=", 2);
String key = propArgs[0].trim();
String value;
if (propArgs.length == 2) {
value = propArgs[1].trim();
} else { // if property is boolean then its mere existence assumes true
value = Property.isValidBooleanPropertyKey(key) ? "true" : "";
}
if (key.isEmpty() || value.isEmpty()) {
throw new IllegalArgumentException("Invalid command line -o option: " + prop);
}
config.put(key, value);
}
return config;
}
@Override
public void parseArgs(String programName, String[] args, Object... others) {
super.parseArgs(programName, args, others);
if (!getOverrides().isEmpty()) {
log.info("The following configuration was set on the command line:");
for (Map.Entry<String,String> entry : getOverrides().entrySet()) {
String key = entry.getKey();
log.info(key + " = " + (Property.isSensitive(key) ? "<hidden>" : entry.getValue()));
}
}
}
}
| 9,715 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/classloader/URLContextClassLoaderFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.classloader;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.spi.common.ContextClassLoaderFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
/**
* The default implementation of ContextClassLoaderFactory. This classloader returns a
* URLClassLoader based on the given context value which is a CSV list of URLs. For example,
* file://path/one/jar1.jar,file://path/two/jar2.jar
*/
public class URLContextClassLoaderFactory implements ContextClassLoaderFactory {
private static final AtomicBoolean isInstantiated = new AtomicBoolean(false);
private static final Logger LOG = LoggerFactory.getLogger(URLContextClassLoaderFactory.class);
private static final String className = URLContextClassLoaderFactory.class.getName();
// Cache the class loaders for re-use
// WeakReferences are used so that the class loaders can be cleaned up when no longer needed
// Classes that are loaded contain a reference to the class loader used to load them
// so the class loader will be garbage collected when no more classes are loaded that reference it
private final Cache<String,URLClassLoader> classloaders =
Caffeine.newBuilder().weakValues().build();
public URLContextClassLoaderFactory() {
if (!isInstantiated.compareAndSet(false, true)) {
throw new IllegalStateException("Can only instantiate " + className + " once");
}
}
@Override
public ClassLoader getClassLoader(String context) {
if (context == null) {
throw new IllegalArgumentException("Unknown context");
}
return classloaders.get(context, k -> {
LOG.debug("Creating URLClassLoader for context, uris: {}", context);
return new URLClassLoader(Arrays.stream(context.split(",")).map(url -> {
try {
return new URL(url);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}).toArray(URL[]::new), ClassLoader.getSystemClassLoader());
});
}
}
| 9,716 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/classloader/ClassLoaderUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.classloader;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.spi.common.ContextClassLoaderFactory;
import org.apache.accumulo.core.util.ConfigurationImpl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ClassLoaderUtil {
private static final Logger LOG = LoggerFactory.getLogger(ClassLoaderUtil.class);
private static ContextClassLoaderFactory FACTORY;
private ClassLoaderUtil() {
// cannot construct; static utilities only
}
/**
* Initialize the ContextClassLoaderFactory
*/
public static synchronized void initContextFactory(AccumuloConfiguration conf) {
if (FACTORY == null) {
LOG.debug("Creating {}", ContextClassLoaderFactory.class.getName());
String factoryName = conf.get(Property.GENERAL_CONTEXT_CLASSLOADER_FACTORY);
if (factoryName == null || factoryName.isEmpty()) {
// load the default implementation
LOG.info("Using default {}, which is subject to change in a future release",
ContextClassLoaderFactory.class.getName());
FACTORY = new URLContextClassLoaderFactory();
} else {
// load user's selected implementation and provide it with the service environment
try {
var factoryClass = Class.forName(factoryName).asSubclass(ContextClassLoaderFactory.class);
LOG.info("Creating {}: {}", ContextClassLoaderFactory.class.getName(), factoryName);
FACTORY = factoryClass.getDeclaredConstructor().newInstance();
FACTORY.init(() -> new ConfigurationImpl(conf));
} catch (ReflectiveOperationException e) {
throw new IllegalStateException("Unable to load and initialize class: " + factoryName, e);
}
}
} else {
LOG.debug("{} already initialized with {}.", ContextClassLoaderFactory.class.getName(),
FACTORY.getClass().getName());
}
}
// for testing
static ContextClassLoaderFactory getContextFactory() {
return FACTORY;
}
// for testing
static synchronized void resetContextFactoryForTests() {
FACTORY = null;
}
public static ClassLoader getClassLoader() {
return getClassLoader(null);
}
public static ClassLoader getClassLoader(String context) {
if (context != null && !context.isEmpty()) {
return FACTORY.getClassLoader(context);
} else {
return ClassLoader.getSystemClassLoader();
}
}
public static boolean isValidContext(String context) {
if (context != null && !context.isEmpty()) {
try {
var loader = FACTORY.getClassLoader(context);
if (loader == null) {
LOG.debug("Context {} resulted in a null classloader from {}.", context,
FACTORY.getClass().getName());
return false;
}
return true;
} catch (RuntimeException e) {
LOG.debug("Context {} is not valid.", context, e);
return false;
}
} else {
return true;
}
}
public static <U> Class<? extends U> loadClass(String context, String className,
Class<U> extension) throws ClassNotFoundException {
return getClassLoader(context).loadClass(className).asSubclass(extension);
}
public static <U> Class<? extends U> loadClass(String className, Class<U> extension)
throws ClassNotFoundException {
return loadClass(null, className, extension);
}
/**
* Retrieve the classloader context from a table's configuration.
*/
public static String tableContext(AccumuloConfiguration conf) {
return conf.get(Property.TABLE_CLASSLOADER_CONTEXT);
}
}
| 9,717 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/AgeOffStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
import java.io.Serializable;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This store removes Repos, in the store it wraps, that are in a finished or new state for more
* than a configurable time period.
*
* No external time source is used. It starts tracking idle time when its created.
*/
public class AgeOffStore<T> implements TStore<T> {
public interface TimeSource {
long currentTimeMillis();
}
private static final Logger log = LoggerFactory.getLogger(AgeOffStore.class);
private final ZooStore<T> store;
private Map<Long,Long> candidates;
private long ageOffTime;
private long minTime;
private TimeSource timeSource;
private synchronized void updateMinTime() {
minTime = Long.MAX_VALUE;
for (Long time : candidates.values()) {
if (time < minTime) {
minTime = time;
}
}
}
private synchronized void addCandidate(long txid) {
long time = timeSource.currentTimeMillis();
candidates.put(txid, time);
if (time < minTime) {
minTime = time;
}
}
private synchronized void removeCandidate(long txid) {
Long time = candidates.remove(txid);
if (time != null && time <= minTime) {
updateMinTime();
}
}
public void ageOff() {
HashSet<Long> oldTxs = new HashSet<>();
synchronized (this) {
long time = timeSource.currentTimeMillis();
if (minTime < time && time - minTime >= ageOffTime) {
for (Entry<Long,Long> entry : candidates.entrySet()) {
if (time - entry.getValue() >= ageOffTime) {
oldTxs.add(entry.getKey());
}
}
candidates.keySet().removeAll(oldTxs);
updateMinTime();
}
}
for (Long txid : oldTxs) {
try {
store.reserve(txid);
try {
switch (store.getStatus(txid)) {
case NEW:
case FAILED:
case SUCCESSFUL:
store.delete(txid);
log.debug("Aged off FATE tx {}", FateTxId.formatTid(txid));
break;
default:
break;
}
} finally {
store.unreserve(txid, 0);
}
} catch (Exception e) {
log.warn("Failed to age off FATE tx " + FateTxId.formatTid(txid), e);
}
}
}
public AgeOffStore(ZooStore<T> store, long ageOffTime, TimeSource timeSource) {
this.store = store;
this.ageOffTime = ageOffTime;
this.timeSource = timeSource;
candidates = new HashMap<>();
minTime = Long.MAX_VALUE;
List<Long> txids = store.list();
for (Long txid : txids) {
store.reserve(txid);
try {
switch (store.getStatus(txid)) {
case NEW:
case FAILED:
case SUCCESSFUL:
addCandidate(txid);
break;
default:
break;
}
} finally {
store.unreserve(txid, 0);
}
}
}
@Override
public long create() {
long txid = store.create();
addCandidate(txid);
return txid;
}
@Override
public long reserve() {
return store.reserve();
}
@Override
public void reserve(long tid) {
store.reserve(tid);
}
@Override
public boolean tryReserve(long tid) {
return store.tryReserve(tid);
}
@Override
public void unreserve(long tid, long deferTime) {
store.unreserve(tid, deferTime);
}
@Override
public Repo<T> top(long tid) {
return store.top(tid);
}
@Override
public void push(long tid, Repo<T> repo) throws StackOverflowException {
store.push(tid, repo);
}
@Override
public void pop(long tid) {
store.pop(tid);
}
@Override
public org.apache.accumulo.core.fate.TStore.TStatus getStatus(long tid) {
return store.getStatus(tid);
}
@Override
public void setStatus(long tid, org.apache.accumulo.core.fate.TStore.TStatus status) {
store.setStatus(tid, status);
switch (status) {
case SUBMITTED:
case IN_PROGRESS:
case FAILED_IN_PROGRESS:
removeCandidate(tid);
break;
case FAILED:
case SUCCESSFUL:
addCandidate(tid);
break;
default:
break;
}
}
@Override
public org.apache.accumulo.core.fate.TStore.TStatus waitForStatusChange(long tid,
EnumSet<org.apache.accumulo.core.fate.TStore.TStatus> expected) {
return store.waitForStatusChange(tid, expected);
}
@Override
public void setTransactionInfo(long tid, Fate.TxInfo txInfo, Serializable val) {
store.setTransactionInfo(tid, txInfo, val);
}
@Override
public Serializable getTransactionInfo(long tid, Fate.TxInfo txInfo) {
return store.getTransactionInfo(tid, txInfo);
}
@Override
public void delete(long tid) {
store.delete(tid);
removeCandidate(tid);
}
@Override
public List<Long> list() {
return store.list();
}
@Override
public long timeCreated(long tid) {
return store.timeCreated(tid);
}
@Override
public List<ReadOnlyRepo<T>> getStack(long tid) {
return store.getStack(tid);
}
}
| 9,718 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyRepo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
/**
* Read only access to a repeatable persisted operation.
*
* By definition, these methods are safe to call without impacting the state of FATE. They should
* also be safe to call without impacting the state of system components.
*/
public interface ReadOnlyRepo<T> {
long isReady(long tid, T environment) throws Exception;
String getName();
}
| 9,719 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/TStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
import java.io.Serializable;
/**
* Transaction Store: a place to save transactions
*
* A transaction consists of a number of operations. To use, first create a transaction id, and then
* seed the transaction with an initial operation. An executor service can then execute the
* transaction's operation, possibly pushing more operations onto the transaction as each step
* successfully completes. If a step fails, the stack can be unwound, undoing each operation.
*/
public interface TStore<T> extends ReadOnlyTStore<T> {
/**
* Create a new transaction id
*
* @return a transaction id
*/
long create();
@Override
Repo<T> top(long tid);
/**
* Update the given transaction with the next operation
*
* @param tid the transaction id
* @param repo the operation
*/
void push(long tid, Repo<T> repo) throws StackOverflowException;
/**
* Remove the last pushed operation from the given transaction.
*/
void pop(long tid);
/**
* Update the state of a given transaction
*
* @param tid transaction id
* @param status execution status
*/
void setStatus(long tid, TStatus status);
/**
* Set transaction-specific information.
*
* @param tid transaction id
* @param txInfo name of attribute of a transaction to set.
* @param val transaction data to store
*/
void setTransactionInfo(long tid, Fate.TxInfo txInfo, Serializable val);
/**
* Remove the transaction from the store.
*
* @param tid the transaction id
*/
void delete(long tid);
/**
* Attempt to reserve transaction
*
* @param tid transaction id
* @return true if reserved by this call, false if already reserved
*/
boolean tryReserve(long tid);
}
| 9,720 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/AdminUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Formatter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus;
import org.apache.accumulo.core.fate.zookeeper.FateLock;
import org.apache.accumulo.core.fate.zookeeper.FateLock.FateLockPath;
import org.apache.accumulo.core.fate.zookeeper.ZooReader;
import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeMissingPolicy;
import org.apache.accumulo.core.lock.ServiceLock;
import org.apache.accumulo.core.lock.ServiceLock.ServiceLockPath;
import org.apache.accumulo.core.util.FastFormat;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* A utility to administer FATE operations
*/
public class AdminUtil<T> {
private static final Logger log = LoggerFactory.getLogger(AdminUtil.class);
private final boolean exitOnError;
/**
* Constructor
*
* @param exitOnError <code>System.exit(1)</code> on error if true
*/
public AdminUtil(boolean exitOnError) {
this.exitOnError = exitOnError;
}
/**
* FATE transaction status, including lock information.
*/
public static class TransactionStatus {
private final long txid;
private final TStatus status;
private final String txName;
private final List<String> hlocks;
private final List<String> wlocks;
private final String top;
private final long timeCreated;
private TransactionStatus(Long tid, TStatus status, String txName, List<String> hlocks,
List<String> wlocks, String top, Long timeCreated) {
this.txid = tid;
this.status = status;
this.txName = txName;
this.hlocks = Collections.unmodifiableList(hlocks);
this.wlocks = Collections.unmodifiableList(wlocks);
this.top = top;
this.timeCreated = timeCreated;
}
/**
* @return This fate operations transaction id, formatted in the same way as FATE transactions
* are in the Accumulo logs.
*/
public String getTxid() {
return FastFormat.toHexString(txid);
}
public TStatus getStatus() {
return status;
}
/**
* @return The name of the transaction running.
*/
public String getTxName() {
return txName;
}
/**
* @return list of namespace and table ids locked
*/
public List<String> getHeldLocks() {
return hlocks;
}
/**
* @return list of namespace and table ids locked
*/
public List<String> getWaitingLocks() {
return wlocks;
}
/**
* @return The operation on the top of the stack for this Fate operation.
*/
public String getTop() {
return top;
}
/**
* @return The timestamp of when the operation was created in ISO format with UTC timezone.
*/
public String getTimeCreatedFormatted() {
return timeCreated > 0 ? new Date(timeCreated).toInstant().atZone(ZoneOffset.UTC)
.format(DateTimeFormatter.ISO_DATE_TIME) : "ERROR";
}
/**
* @return The unformatted form of the timestamp.
*/
public long getTimeCreated() {
return timeCreated;
}
}
public static class FateStatus {
private final List<TransactionStatus> transactions;
private final Map<String,List<String>> danglingHeldLocks;
private final Map<String,List<String>> danglingWaitingLocks;
/**
* Convert FATE transactions IDs in keys of map to format that used in printing and logging FATE
* transactions ids. This is done so that if the map is printed, the output can be used to
* search Accumulo's logs.
*/
private static Map<String,List<String>> convert(Map<Long,List<String>> danglocks) {
if (danglocks.isEmpty()) {
return Collections.emptyMap();
}
Map<String,List<String>> ret = new HashMap<>();
for (Entry<Long,List<String>> entry : danglocks.entrySet()) {
ret.put(FastFormat.toHexString(entry.getKey()),
Collections.unmodifiableList(entry.getValue()));
}
return Collections.unmodifiableMap(ret);
}
private FateStatus(List<TransactionStatus> transactions,
Map<Long,List<String>> danglingHeldLocks, Map<Long,List<String>> danglingWaitingLocks) {
this.transactions = Collections.unmodifiableList(transactions);
this.danglingHeldLocks = convert(danglingHeldLocks);
this.danglingWaitingLocks = convert(danglingWaitingLocks);
}
public List<TransactionStatus> getTransactions() {
return transactions;
}
/**
* Get locks that are held by non existent FATE transactions. These are table or namespace
* locks.
*
* @return map where keys are transaction ids and values are a list of table IDs and/or
* namespace IDs. The transaction IDs are in the same format as transaction IDs in the
* Accumulo logs.
*/
public Map<String,List<String>> getDanglingHeldLocks() {
return danglingHeldLocks;
}
/**
* Get locks that are waiting to be acquired by non existent FATE transactions. These are table
* or namespace locks.
*
* @return map where keys are transaction ids and values are a list of table IDs and/or
* namespace IDs. The transaction IDs are in the same format as transaction IDs in the
* Accumulo logs.
*/
public Map<String,List<String>> getDanglingWaitingLocks() {
return danglingWaitingLocks;
}
}
/**
* Returns a list of the FATE transactions, optionally filtered by transaction id and status. This
* method does not process lock information, if lock information is desired, use
* {@link #getStatus(ReadOnlyTStore, ZooReader, ServiceLockPath, Set, EnumSet)}
*
* @param zs read-only zoostore
* @param filterTxid filter results to include for provided transaction ids.
* @param filterStatus filter results to include only provided status types
* @return list of FATE transactions that match filter criteria
*/
public List<TransactionStatus> getTransactionStatus(ReadOnlyTStore<T> zs, Set<Long> filterTxid,
EnumSet<TStatus> filterStatus) {
FateStatus status = getTransactionStatus(zs, filterTxid, filterStatus,
Collections.<Long,List<String>>emptyMap(), Collections.<Long,List<String>>emptyMap());
return status.getTransactions();
}
/**
* Get the FATE transaction status and lock information stored in zookeeper, optionally filtered
* by transaction id and filter status.
*
* @param zs read-only zoostore
* @param zk zookeeper reader.
* @param lockPath the zookeeper path for locks
* @param filterTxid filter results to include for provided transaction ids.
* @param filterStatus filter results to include only provided status types
* @return a summary container of the fate transactions.
* @throws KeeperException if zookeeper exception occurs
* @throws InterruptedException if process is interrupted.
*/
public FateStatus getStatus(ReadOnlyTStore<T> zs, ZooReader zk,
ServiceLock.ServiceLockPath lockPath, Set<Long> filterTxid, EnumSet<TStatus> filterStatus)
throws KeeperException, InterruptedException {
Map<Long,List<String>> heldLocks = new HashMap<>();
Map<Long,List<String>> waitingLocks = new HashMap<>();
findLocks(zk, lockPath, heldLocks, waitingLocks);
return getTransactionStatus(zs, filterTxid, filterStatus, heldLocks, waitingLocks);
}
/**
* Walk through the lock nodes in zookeeper to find and populate held locks and waiting locks.
*
* @param zk zookeeper reader
* @param lockPath the zookeeper path for locks
* @param heldLocks map for returning transactions with held locks
* @param waitingLocks map for returning transactions with waiting locks
* @throws KeeperException if initial lock list cannot be read.
* @throws InterruptedException if thread interrupt detected while processing.
*/
private void findLocks(ZooReader zk, final ServiceLock.ServiceLockPath lockPath,
final Map<Long,List<String>> heldLocks, final Map<Long,List<String>> waitingLocks)
throws KeeperException, InterruptedException {
// stop with exception if lock ids cannot be retrieved from zookeeper
List<String> lockedIds = zk.getChildren(lockPath.toString());
for (String id : lockedIds) {
try {
FateLockPath fLockPath = FateLock.path(lockPath + "/" + id);
List<String> lockNodes =
FateLock.validateAndSort(fLockPath, zk.getChildren(fLockPath.toString()));
int pos = 0;
boolean sawWriteLock = false;
for (String node : lockNodes) {
try {
byte[] data = zk.getData(lockPath + "/" + id + "/" + node);
String[] lda = new String(data, UTF_8).split(":");
if (lda[0].charAt(0) == 'W') {
sawWriteLock = true;
}
Map<Long,List<String>> locks;
if (pos == 0) {
locks = heldLocks;
} else if (lda[0].charAt(0) == 'R' && !sawWriteLock) {
locks = heldLocks;
} else {
locks = waitingLocks;
}
locks.computeIfAbsent(Long.parseLong(lda[1], 16), k -> new ArrayList<>())
.add(lda[0].charAt(0) + ":" + id);
} catch (Exception e) {
log.error("{}", e.getMessage(), e);
}
pos++;
}
} catch (KeeperException ex) {
/*
* could be transient zk error. Log, but try to process rest of list rather than throwing
* exception here
*/
log.error("Failed to read locks for " + id + " continuing.", ex);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw ex;
}
}
}
/**
* Returns fate status, possibly filtered
*
* @param zs read-only access to a populated transaction store.
* @param filterTxid Optional. List of transactions to filter results - if null, all transactions
* are returned
* @param filterStatus Optional. List of status types to filter results - if null, all
* transactions are returned.
* @param heldLocks populated list of locks held by transaction - or an empty map if none.
* @param waitingLocks populated list of locks held by transaction - or an empty map if none.
* @return current fate and lock status
*/
private FateStatus getTransactionStatus(ReadOnlyTStore<T> zs, Set<Long> filterTxid,
EnumSet<TStatus> filterStatus, Map<Long,List<String>> heldLocks,
Map<Long,List<String>> waitingLocks) {
List<Long> transactions = zs.list();
List<TransactionStatus> statuses = new ArrayList<>(transactions.size());
for (Long tid : transactions) {
zs.reserve(tid);
String txName = (String) zs.getTransactionInfo(tid, Fate.TxInfo.TX_NAME);
List<String> hlocks = heldLocks.remove(tid);
if (hlocks == null) {
hlocks = Collections.emptyList();
}
List<String> wlocks = waitingLocks.remove(tid);
if (wlocks == null) {
wlocks = Collections.emptyList();
}
String top = null;
ReadOnlyRepo<T> repo = zs.top(tid);
if (repo != null) {
top = repo.getName();
}
TStatus status = zs.getStatus(tid);
long timeCreated = zs.timeCreated(tid);
zs.unreserve(tid, 0);
if (includeByStatus(status, filterStatus) && includeByTxid(tid, filterTxid)) {
statuses.add(new TransactionStatus(tid, status, txName, hlocks, wlocks, top, timeCreated));
}
}
return new FateStatus(statuses, heldLocks, waitingLocks);
}
private boolean includeByStatus(TStatus status, EnumSet<TStatus> filterStatus) {
return (filterStatus == null) || filterStatus.contains(status);
}
private boolean includeByTxid(Long tid, Set<Long> filterTxid) {
return (filterTxid == null) || filterTxid.isEmpty() || filterTxid.contains(tid);
}
public void printAll(ReadOnlyTStore<T> zs, ZooReader zk,
ServiceLock.ServiceLockPath tableLocksPath) throws KeeperException, InterruptedException {
print(zs, zk, tableLocksPath, new Formatter(System.out), null, null);
}
public void print(ReadOnlyTStore<T> zs, ZooReader zk, ServiceLock.ServiceLockPath tableLocksPath,
Formatter fmt, Set<Long> filterTxid, EnumSet<TStatus> filterStatus)
throws KeeperException, InterruptedException {
FateStatus fateStatus = getStatus(zs, zk, tableLocksPath, filterTxid, filterStatus);
for (TransactionStatus txStatus : fateStatus.getTransactions()) {
fmt.format(
"%-15s txid: %s status: %-18s locked: %-15s locking: %-15s op: %-15s created: %s%n",
txStatus.getTxName(), txStatus.getTxid(), txStatus.getStatus(), txStatus.getHeldLocks(),
txStatus.getWaitingLocks(), txStatus.getTop(), txStatus.getTimeCreatedFormatted());
}
fmt.format(" %s transactions", fateStatus.getTransactions().size());
if (!fateStatus.getDanglingHeldLocks().isEmpty()
|| !fateStatus.getDanglingWaitingLocks().isEmpty()) {
fmt.format("%nThe following locks did not have an associated FATE operation%n");
for (Entry<String,List<String>> entry : fateStatus.getDanglingHeldLocks().entrySet()) {
fmt.format("txid: %s locked: %s%n", entry.getKey(), entry.getValue());
}
for (Entry<String,List<String>> entry : fateStatus.getDanglingWaitingLocks().entrySet()) {
fmt.format("txid: %s locking: %s%n", entry.getKey(), entry.getValue());
}
}
}
public boolean prepDelete(TStore<T> zs, ZooReaderWriter zk, ServiceLockPath path,
String txidStr) {
if (!checkGlobalLock(zk, path)) {
return false;
}
long txid;
try {
txid = Long.parseLong(txidStr, 16);
} catch (NumberFormatException nfe) {
System.out.printf("Invalid transaction ID format: %s%n", txidStr);
return false;
}
boolean state = false;
zs.reserve(txid);
TStatus ts = zs.getStatus(txid);
switch (ts) {
case UNKNOWN:
System.out.printf("Invalid transaction ID: %016x%n", txid);
break;
case SUBMITTED:
case IN_PROGRESS:
case NEW:
case FAILED:
case FAILED_IN_PROGRESS:
case SUCCESSFUL:
System.out.printf("Deleting transaction: %016x (%s)%n", txid, ts);
zs.delete(txid);
state = true;
break;
}
zs.unreserve(txid, 0);
return state;
}
public boolean prepFail(TStore<T> zs, ZooReaderWriter zk, ServiceLockPath zLockManagerPath,
String txidStr) {
if (!checkGlobalLock(zk, zLockManagerPath)) {
return false;
}
long txid;
try {
txid = Long.parseLong(txidStr, 16);
} catch (NumberFormatException nfe) {
System.out.printf("Invalid transaction ID format: %s%n", txidStr);
return false;
}
boolean state = false;
zs.reserve(txid);
TStatus ts = zs.getStatus(txid);
switch (ts) {
case UNKNOWN:
System.out.printf("Invalid transaction ID: %016x%n", txid);
break;
case SUBMITTED:
case IN_PROGRESS:
case NEW:
System.out.printf("Failing transaction: %016x (%s)%n", txid, ts);
zs.setStatus(txid, TStatus.FAILED_IN_PROGRESS);
state = true;
break;
case SUCCESSFUL:
System.out.printf("Transaction already completed: %016x (%s)%n", txid, ts);
break;
case FAILED:
case FAILED_IN_PROGRESS:
System.out.printf("Transaction already failed: %016x (%s)%n", txid, ts);
state = true;
break;
}
zs.unreserve(txid, 0);
return state;
}
public void deleteLocks(ZooReaderWriter zk, ServiceLock.ServiceLockPath path, String txidStr)
throws KeeperException, InterruptedException {
// delete any locks assoc w/ fate operation
List<String> lockedIds = zk.getChildren(path.toString());
for (String id : lockedIds) {
List<String> lockNodes = zk.getChildren(path + "/" + id);
for (String node : lockNodes) {
String lockPath = path + "/" + id + "/" + node;
byte[] data = zk.getData(path + "/" + id + "/" + node);
String[] lda = new String(data, UTF_8).split(":");
if (lda[1].equals(txidStr)) {
zk.recursiveDelete(lockPath, NodeMissingPolicy.SKIP);
}
}
}
}
@SuppressFBWarnings(value = "DM_EXIT",
justification = "TODO - should probably avoid System.exit here; "
+ "this code is used by the fate admin shell command")
public boolean checkGlobalLock(ZooReaderWriter zk, ServiceLockPath zLockManagerPath) {
try {
if (ServiceLock.getLockData(zk.getZooKeeper(), zLockManagerPath) != null) {
System.err.println("ERROR: Manager lock is held, not running");
if (this.exitOnError) {
System.exit(1);
} else {
return false;
}
}
} catch (KeeperException e) {
System.err.println("ERROR: Could not read manager lock, not running " + e.getMessage());
if (this.exitOnError) {
System.exit(1);
} else {
return false;
}
} catch (InterruptedException e) {
System.err.println("ERROR: Could not read manager lock, not running" + e.getMessage());
if (this.exitOnError) {
System.exit(1);
} else {
return false;
}
}
return true;
}
}
| 9,721 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/AcceptableException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
/**
* An exception for FATE operations to use to denote when an Exception is acceptable and should not
* trigger warning messages. This exception is intended to wrap an existing exception from a FATE op
* implementation so that the FATE runner can know that the exception doesn't need to warn.
* <p>
* Often times, problems that map well into the FATE execution model have states in which it is
* impossible to know ahead of time if an exception will be thrown. For example, with concurrent
* create table operations, one of the operations will fail because the table already exists, but
* this is not an error condition for the system. It is normal and expected.
*/
public interface AcceptableException {
}
| 9,722 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/Fate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus.FAILED;
import static org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus.FAILED_IN_PROGRESS;
import static org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus.IN_PROGRESS;
import static org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus.NEW;
import static org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus.SUBMITTED;
import static org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus.SUCCESSFUL;
import static org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus.UNKNOWN;
import static org.apache.accumulo.core.util.ShutdownUtil.isIOException;
import java.util.EnumSet;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus;
import org.apache.accumulo.core.logging.FateLogger;
import org.apache.accumulo.core.util.ShutdownUtil;
import org.apache.accumulo.core.util.UtilWaitThread;
import org.apache.accumulo.core.util.threads.ThreadPools;
import org.apache.thrift.TApplicationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Fault tolerant executor
*/
public class Fate<T> {
private static final Logger log = LoggerFactory.getLogger(Fate.class);
private final Logger runnerLog = LoggerFactory.getLogger(TransactionRunner.class);
private final TStore<T> store;
private final T environment;
private final ScheduledThreadPoolExecutor fatePoolWatcher;
private final ExecutorService executor;
private static final EnumSet<TStatus> FINISHED_STATES = EnumSet.of(FAILED, SUCCESSFUL, UNKNOWN);
private final AtomicBoolean keepRunning = new AtomicBoolean(true);
public enum TxInfo {
TX_NAME, AUTO_CLEAN, EXCEPTION, RETURN_VALUE
}
private class TransactionRunner implements Runnable {
@Override
public void run() {
while (keepRunning.get()) {
long deferTime = 0;
Long tid = null;
try {
tid = store.reserve();
TStatus status = store.getStatus(tid);
Repo<T> op = store.top(tid);
if (status == FAILED_IN_PROGRESS) {
processFailed(tid, op);
} else {
Repo<T> prevOp = null;
try {
deferTime = op.isReady(tid, environment);
// Here, deferTime is only used to determine success (zero) or failure (non-zero),
// proceeding on success and returning to the while loop on failure.
// The value of deferTime is only used as a wait time in ZooStore.unreserve
if (deferTime == 0) {
prevOp = op;
if (status == SUBMITTED) {
store.setStatus(tid, IN_PROGRESS);
}
op = op.call(tid, environment);
} else {
continue;
}
} catch (Exception e) {
blockIfHadoopShutdown(tid, e);
transitionToFailed(tid, e);
continue;
}
if (op == null) {
// transaction is finished
String ret = prevOp.getReturn();
if (ret != null) {
store.setTransactionInfo(tid, TxInfo.RETURN_VALUE, ret);
}
store.setStatus(tid, SUCCESSFUL);
doCleanUp(tid);
} else {
try {
store.push(tid, op);
} catch (StackOverflowException e) {
// the op that failed to push onto the stack was never executed, so no need to undo
// it
// just transition to failed and undo the ops that executed
transitionToFailed(tid, e);
continue;
}
}
}
} catch (Exception e) {
runnerLog.error("Uncaught exception in FATE runner thread.", e);
} finally {
if (tid != null) {
store.unreserve(tid, deferTime);
}
}
}
}
/**
* The Hadoop Filesystem registers a java shutdown hook that closes the file system. This can
* cause threads to get spurious IOException. If this happens, instead of failing a FATE
* transaction just wait for process to die. When the manager start elsewhere the FATE
* transaction can resume.
*/
private void blockIfHadoopShutdown(long tid, Exception e) {
if (ShutdownUtil.isShutdownInProgress()) {
String tidStr = FateTxId.formatTid(tid);
if (e instanceof AcceptableException) {
log.debug("Ignoring exception possibly caused by Hadoop Shutdown hook. {} ", tidStr, e);
} else if (isIOException(e)) {
log.info("Ignoring exception likely caused by Hadoop Shutdown hook. {} ", tidStr, e);
} else {
// sometimes code will catch an IOException caused by the hadoop shutdown hook and throw
// another exception without setting the cause.
log.warn("Ignoring exception possibly caused by Hadoop Shutdown hook. {} ", tidStr, e);
}
while (true) {
// Nothing is going to work well at this point, so why even try. Just wait for the end,
// preventing this FATE thread from processing further work and likely failing.
sleepUninterruptibly(1, MINUTES);
}
}
}
private void transitionToFailed(long tid, Exception e) {
String tidStr = FateTxId.formatTid(tid);
final String msg = "Failed to execute Repo " + tidStr;
// Certain FATE ops that throw exceptions don't need to be propagated up to the Monitor
// as a warning. They're a normal, handled failure condition.
if (e instanceof AcceptableException) {
var tableOpEx = (AcceptableThriftTableOperationException) e;
log.debug(msg + " for {}({}) {}", tableOpEx.getTableName(), tableOpEx.getTableId(),
tableOpEx.getDescription());
} else {
log.warn(msg, e);
}
store.setTransactionInfo(tid, TxInfo.EXCEPTION, e);
store.setStatus(tid, FAILED_IN_PROGRESS);
log.info("Updated status for Repo with {} to FAILED_IN_PROGRESS", tidStr);
}
private void processFailed(long tid, Repo<T> op) {
while (op != null) {
undo(tid, op);
store.pop(tid);
op = store.top(tid);
}
store.setStatus(tid, FAILED);
doCleanUp(tid);
}
private void doCleanUp(long tid) {
Boolean autoClean = (Boolean) store.getTransactionInfo(tid, TxInfo.AUTO_CLEAN);
if (autoClean != null && autoClean) {
store.delete(tid);
} else {
// no longer need persisted operations, so delete them to save space in case
// TX is never cleaned up...
while (store.top(tid) != null) {
store.pop(tid);
}
}
}
private void undo(long tid, Repo<T> op) {
try {
op.undo(tid, environment);
} catch (Exception e) {
log.warn("Failed to undo Repo, " + FateTxId.formatTid(tid), e);
}
}
}
/**
* Creates a Fault-tolerant executor.
*
* @param toLogStrFunc A function that converts Repo to Strings that are suitable for logging
*/
public Fate(T environment, TStore<T> store, Function<Repo<T>,String> toLogStrFunc,
AccumuloConfiguration conf) {
this.store = FateLogger.wrap(store, toLogStrFunc);
this.environment = environment;
final ThreadPoolExecutor pool = ThreadPools.getServerThreadPools().createExecutorService(conf,
Property.MANAGER_FATE_THREADPOOL_SIZE, true);
this.fatePoolWatcher =
ThreadPools.getServerThreadPools().createGeneralScheduledExecutorService(conf);
ThreadPools.watchCriticalScheduledTask(fatePoolWatcher.schedule(() -> {
// resize the pool if the property changed
ThreadPools.resizePool(pool, conf, Property.MANAGER_FATE_THREADPOOL_SIZE);
// If the pool grew, then ensure that there is a TransactionRunner for each thread
int needed = conf.getCount(Property.MANAGER_FATE_THREADPOOL_SIZE) - pool.getQueue().size();
if (needed > 0) {
for (int i = 0; i < needed; i++) {
try {
pool.execute(new TransactionRunner());
} catch (RejectedExecutionException e) {
// RejectedExecutionException could be shutting down
if (pool.isShutdown()) {
// The exception is expected in this case, no need to spam the logs.
log.trace("Error adding transaction runner to FaTE executor pool.", e);
} else {
// This is bad, FaTE may no longer work!
log.error("Error adding transaction runner to FaTE executor pool.", e);
}
break;
}
}
}
}, 3, SECONDS));
this.executor = pool;
}
// get a transaction id back to the requester before doing any work
public long startTransaction() {
return store.create();
}
// start work in the transaction.. it is safe to call this
// multiple times for a transaction... but it will only seed once
public void seedTransaction(String txName, long tid, Repo<T> repo, boolean autoCleanUp,
String goalMessage) {
store.reserve(tid);
try {
if (store.getStatus(tid) == NEW) {
if (store.top(tid) == null) {
try {
log.info("Seeding {} {}", FateTxId.formatTid(tid), goalMessage);
store.push(tid, repo);
} catch (StackOverflowException e) {
// this should not happen
throw new IllegalStateException(e);
}
}
if (autoCleanUp) {
store.setTransactionInfo(tid, TxInfo.AUTO_CLEAN, autoCleanUp);
}
store.setTransactionInfo(tid, TxInfo.TX_NAME, txName);
store.setStatus(tid, SUBMITTED);
}
} finally {
store.unreserve(tid, 0);
}
}
// check on the transaction
public TStatus waitForCompletion(long tid) {
return store.waitForStatusChange(tid, FINISHED_STATES);
}
/**
* Attempts to cancel a running Fate transaction
*
* @param tid transaction id
* @return true if transaction transitioned to a failed state or already in a completed state,
* false otherwise
*/
public boolean cancel(long tid) {
String tidStr = FateTxId.formatTid(tid);
for (int retries = 0; retries < 5; retries++) {
if (store.tryReserve(tid)) {
try {
TStatus status = store.getStatus(tid);
log.info("status is: {}", status);
if (status == NEW || status == SUBMITTED) {
store.setTransactionInfo(tid, TxInfo.EXCEPTION, new TApplicationException(
TApplicationException.INTERNAL_ERROR, "Fate transaction cancelled by user"));
store.setStatus(tid, FAILED_IN_PROGRESS);
log.info("Updated status for {} to FAILED_IN_PROGRESS because it was cancelled by user",
tidStr);
return true;
} else {
log.info("{} cancelled by user but already in progress or finished state", tidStr);
return false;
}
} finally {
store.unreserve(tid, 0);
}
} else {
// reserved, lets retry.
UtilWaitThread.sleep(500);
}
}
log.info("Unable to reserve transaction {} to cancel it", tid);
return false;
}
// resource cleanup
public void delete(long tid) {
store.reserve(tid);
try {
switch (store.getStatus(tid)) {
case NEW:
case SUBMITTED:
case FAILED:
case SUCCESSFUL:
store.delete(tid);
break;
case FAILED_IN_PROGRESS:
case IN_PROGRESS:
throw new IllegalStateException(
"Can not delete in progress transaction " + FateTxId.formatTid(tid));
case UNKNOWN:
// nothing to do, it does not exist
break;
}
} finally {
store.unreserve(tid, 0);
}
}
public String getReturn(long tid) {
store.reserve(tid);
try {
if (store.getStatus(tid) != SUCCESSFUL) {
throw new IllegalStateException("Tried to get exception when transaction "
+ FateTxId.formatTid(tid) + " not in successful state");
}
return (String) store.getTransactionInfo(tid, TxInfo.RETURN_VALUE);
} finally {
store.unreserve(tid, 0);
}
}
// get reportable failures
public Exception getException(long tid) {
store.reserve(tid);
try {
if (store.getStatus(tid) != FAILED) {
throw new IllegalStateException("Tried to get exception when transaction "
+ FateTxId.formatTid(tid) + " not in failed state");
}
return (Exception) store.getTransactionInfo(tid, TxInfo.EXCEPTION);
} finally {
store.unreserve(tid, 0);
}
}
/**
* Flags that FATE threadpool to clear out and end. Does not actively stop running FATE processes.
*/
public void shutdown() {
keepRunning.set(false);
fatePoolWatcher.shutdown();
if (executor != null) {
executor.shutdown();
}
}
}
| 9,723 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyTStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
import java.io.Serializable;
import java.util.EnumSet;
import java.util.List;
/**
* Read only access to a Transaction Store.
*
* A transaction consists of a number of operations. Instances of this class may check on the queue
* of outstanding transactions but may neither modify them nor create new ones.
*/
public interface ReadOnlyTStore<T> {
/**
* Possible operational status codes. Serialized by name within stores.
*/
enum TStatus {
/** Unseeded transaction */
NEW,
/** Transaction that is executing */
IN_PROGRESS,
/** Transaction has failed, and is in the process of being rolled back */
FAILED_IN_PROGRESS,
/** Transaction has failed and has been fully rolled back */
FAILED,
/** Transaction has succeeded */
SUCCESSFUL,
/** Unrecognized or unknown transaction state */
UNKNOWN,
/** Transaction that is eligible to be executed */
SUBMITTED
}
/**
* Reserve a transaction that is IN_PROGRESS or FAILED_IN_PROGRESS.
*
* Reserving a transaction id ensures that nothing else in-process interacting via the same
* instance will be operating on that transaction id.
*
* @return a transaction id that is safe to interact with, chosen by the store.
*/
long reserve();
/**
* Reserve the specific tid.
*
* Reserving a transaction id ensures that nothing else in-process interacting via the same
* instance will be operating on that transaction id.
*
*/
void reserve(long tid);
/**
* Return the given transaction to the store.
*
* upon successful return the store now controls the referenced transaction id. caller should no
* longer interact with it.
*
* @param tid transaction id, previously reserved.
* @param deferTime time in millis to keep this transaction out of the pool used in the
* {@link #reserve() reserve} method. must be non-negative.
*/
void unreserve(long tid, long deferTime);
/**
* Get the current operation for the given transaction id.
*
* Caller must have already reserved tid.
*
* @param tid transaction id, previously reserved.
* @return a read-only view of the operation
*/
ReadOnlyRepo<T> top(long tid);
/**
* Get all operations on a transactions stack. Element 0 contains the most recent operation pushed
* or the top.
*/
List<ReadOnlyRepo<T>> getStack(long tid);
/**
* Get the state of a given transaction.
*
* Caller must have already reserved tid.
*
* @param tid transaction id, previously reserved.
* @return execution status
*/
TStatus getStatus(long tid);
/**
* Wait for the status of a transaction to change
*
* @param tid transaction id, need not have been reserved.
* @param expected a set of possible statuses we are interested in being notified about. may not
* be null.
* @return execution status.
*/
TStatus waitForStatusChange(long tid, EnumSet<TStatus> expected);
/**
* Retrieve transaction-specific information.
*
* Caller must have already reserved tid.
*
* @param tid transaction id, previously reserved.
* @param txInfo name of attribute of a transaction to retrieve.
*/
Serializable getTransactionInfo(long tid, Fate.TxInfo txInfo);
/**
* list all transaction ids in store.
*
* @return all outstanding transactions, including those reserved by others.
*/
List<Long> list();
/**
* Retrieve the creation time of a FaTE transaction.
*
* @param tid Transaction id, previously reserved.
* @return creation time of transaction.
*/
long timeCreated(long tid);
}
| 9,724 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/Repo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
import java.io.Serializable;
/**
* Repeatable persisted operation
*/
public interface Repo<T> extends ReadOnlyRepo<T>, Serializable {
Repo<T> call(long tid, T environment) throws Exception;
void undo(long tid, T environment) throws Exception;
// this allows the last fate op to return something to the user
String getReturn();
}
| 9,725 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/ZooStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.io.UncheckedIOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeExistsPolicy;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeMissingPolicy;
import org.apache.accumulo.core.util.FastFormat;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
//TODO use zoocache? - ACCUMULO-1297
//TODO handle zookeeper being down gracefully - ACCUMULO-1297
public class ZooStore<T> implements TStore<T> {
private static final Logger log = LoggerFactory.getLogger(ZooStore.class);
private String path;
private ZooReaderWriter zk;
private String lastReserved = "";
private Set<Long> reserved;
private Map<Long,Long> defered;
private long statusChangeEvents = 0;
private int reservationsWaiting = 0;
private byte[] serialize(Object o) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
oos.writeObject(o);
oos.close();
return baos.toByteArray();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@SuppressFBWarnings(value = "OBJECT_DESERIALIZATION",
justification = "unsafe to store arbitrary serialized objects like this, but needed for now"
+ " for backwards compatibility")
private Object deserialize(byte[] ser) {
try {
ByteArrayInputStream bais = new ByteArrayInputStream(ser);
ObjectInputStream ois = new ObjectInputStream(bais);
return ois.readObject();
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (ReflectiveOperationException e) {
throw new IllegalStateException(e);
}
}
private String getTXPath(long tid) {
return FastFormat.toHexString(path + "/tx_", tid, "");
}
private long parseTid(String txdir) {
return Long.parseLong(txdir.split("_")[1], 16);
}
public ZooStore(String path, ZooReaderWriter zk) throws KeeperException, InterruptedException {
this.path = path;
this.zk = zk;
this.reserved = new HashSet<>();
this.defered = new HashMap<>();
zk.putPersistentData(path, new byte[0], NodeExistsPolicy.SKIP);
}
/**
* For testing only
*/
ZooStore() {}
@Override
public long create() {
while (true) {
try {
// looking at the code for SecureRandom, it appears to be thread safe
long tid = RANDOM.get().nextLong() & 0x7fffffffffffffffL;
zk.putPersistentData(getTXPath(tid), TStatus.NEW.name().getBytes(UTF_8),
NodeExistsPolicy.FAIL);
return tid;
} catch (NodeExistsException nee) {
// exist, so just try another random #
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
}
@Override
public long reserve() {
try {
while (true) {
long events;
synchronized (this) {
events = statusChangeEvents;
}
List<String> txdirs = new ArrayList<>(zk.getChildren(path));
Collections.sort(txdirs);
synchronized (this) {
if (!txdirs.isEmpty() && txdirs.get(txdirs.size() - 1).compareTo(lastReserved) <= 0) {
lastReserved = "";
}
}
for (String txdir : txdirs) {
long tid = parseTid(txdir);
synchronized (this) {
// this check makes reserve pick up where it left off, so that it cycles through all as
// it is repeatedly called.... failing to do so can lead to
// starvation where fate ops that sort higher and hold a lock are never reserved.
if (txdir.compareTo(lastReserved) <= 0) {
continue;
}
if (defered.containsKey(tid)) {
if (defered.get(tid) < System.currentTimeMillis()) {
defered.remove(tid);
} else {
continue;
}
}
if (reserved.contains(tid)) {
continue;
} else {
reserved.add(tid);
lastReserved = txdir;
}
}
// have reserved id, status should not change
try {
TStatus status = TStatus.valueOf(new String(zk.getData(path + "/" + txdir), UTF_8));
if (status == TStatus.SUBMITTED || status == TStatus.IN_PROGRESS
|| status == TStatus.FAILED_IN_PROGRESS) {
return tid;
} else {
unreserve(tid);
}
} catch (NoNodeException nne) {
// node deleted after we got the list of children, its ok
unreserve(tid);
} catch (KeeperException | InterruptedException | RuntimeException e) {
unreserve(tid);
throw e;
}
}
synchronized (this) {
// suppress lgtm alert - synchronized variable is not always true
if (events == statusChangeEvents) { // lgtm [java/constant-comparison]
if (defered.isEmpty()) {
this.wait(5000);
} else {
Long minTime = Collections.min(defered.values());
long waitTime = minTime - System.currentTimeMillis();
if (waitTime > 0) {
this.wait(Math.min(waitTime, 5000));
}
}
}
}
}
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
@Override
public void reserve(long tid) {
synchronized (this) {
reservationsWaiting++;
try {
while (reserved.contains(tid)) {
try {
this.wait(1000);
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
reserved.add(tid);
} finally {
reservationsWaiting--;
}
}
}
/**
* Attempt to reserve transaction
*
* @param tid transaction id
* @return true if reserved by this call, false if already reserved
*/
@Override
public boolean tryReserve(long tid) {
synchronized (this) {
if (!reserved.contains(tid)) {
reserve(tid);
return true;
}
return false;
}
}
private void unreserve(long tid) {
synchronized (this) {
if (!reserved.remove(tid)) {
throw new IllegalStateException(
"Tried to unreserve id that was not reserved " + FateTxId.formatTid(tid));
}
// do not want this unreserve to unesc wake up threads in reserve()... this leads to infinite
// loop when tx is stuck in NEW...
// only do this when something external has called reserve(tid)...
if (reservationsWaiting > 0) {
this.notifyAll();
}
}
}
@Override
public void unreserve(long tid, long deferTime) {
if (deferTime < 0) {
throw new IllegalArgumentException("deferTime < 0 : " + deferTime);
}
synchronized (this) {
if (!reserved.remove(tid)) {
throw new IllegalStateException(
"Tried to unreserve id that was not reserved " + FateTxId.formatTid(tid));
}
if (deferTime > 0) {
defered.put(tid, System.currentTimeMillis() + deferTime);
}
this.notifyAll();
}
}
private void verifyReserved(long tid) {
synchronized (this) {
if (!reserved.contains(tid)) {
throw new IllegalStateException(
"Tried to operate on unreserved transaction " + FateTxId.formatTid(tid));
}
}
}
private static final int RETRIES = 10;
@Override
public Repo<T> top(long tid) {
verifyReserved(tid);
for (int i = 0; i < RETRIES; i++) {
String txpath = getTXPath(tid);
try {
String top;
try {
top = findTop(txpath);
if (top == null) {
return null;
}
} catch (KeeperException.NoNodeException ex) {
throw new IllegalStateException(ex);
}
byte[] ser = zk.getData(txpath + "/" + top);
@SuppressWarnings("unchecked")
var deserialized = (Repo<T>) deserialize(ser);
return deserialized;
} catch (KeeperException.NoNodeException ex) {
log.debug("zookeeper error reading " + txpath + ": " + ex, ex);
sleepUninterruptibly(100, MILLISECONDS);
continue;
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
return null;
}
private String findTop(String txpath) throws KeeperException, InterruptedException {
List<String> ops = zk.getChildren(txpath);
ops = new ArrayList<>(ops);
String max = "";
for (String child : ops) {
if (child.startsWith("repo_") && child.compareTo(max) > 0) {
max = child;
}
}
if (max.equals("")) {
return null;
}
return max;
}
@Override
public void push(long tid, Repo<T> repo) throws StackOverflowException {
verifyReserved(tid);
String txpath = getTXPath(tid);
try {
String top = findTop(txpath);
if (top != null && Long.parseLong(top.split("_")[1]) > 100) {
throw new StackOverflowException("Repo stack size too large");
}
zk.putPersistentSequential(txpath + "/repo_", serialize(repo));
} catch (StackOverflowException soe) {
throw soe;
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
@Override
public void pop(long tid) {
verifyReserved(tid);
try {
String txpath = getTXPath(tid);
String top = findTop(txpath);
if (top == null) {
throw new IllegalStateException("Tried to pop when empty " + FateTxId.formatTid(tid));
}
zk.recursiveDelete(txpath + "/" + top, NodeMissingPolicy.SKIP);
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
private TStatus _getStatus(long tid) {
try {
return TStatus.valueOf(new String(zk.getData(getTXPath(tid)), UTF_8));
} catch (NoNodeException nne) {
return TStatus.UNKNOWN;
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
@Override
public TStatus getStatus(long tid) {
verifyReserved(tid);
return _getStatus(tid);
}
@Override
public TStatus waitForStatusChange(long tid, EnumSet<TStatus> expected) {
while (true) {
long events;
synchronized (this) {
events = statusChangeEvents;
}
TStatus status = _getStatus(tid);
if (expected.contains(status)) {
return status;
}
synchronized (this) {
// suppress lgtm alert - synchronized variable is not always true
if (events == statusChangeEvents) { // lgtm [java/constant-comparison]
try {
this.wait(5000);
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
}
}
}
@Override
public void setStatus(long tid, TStatus status) {
verifyReserved(tid);
try {
zk.putPersistentData(getTXPath(tid), status.name().getBytes(UTF_8),
NodeExistsPolicy.OVERWRITE);
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
synchronized (this) {
statusChangeEvents++;
}
}
@Override
public void delete(long tid) {
verifyReserved(tid);
try {
zk.recursiveDelete(getTXPath(tid), NodeMissingPolicy.SKIP);
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
@Override
public void setTransactionInfo(long tid, Fate.TxInfo txInfo, Serializable so) {
verifyReserved(tid);
try {
if (so instanceof String) {
zk.putPersistentData(getTXPath(tid) + "/" + txInfo, ("S " + so).getBytes(UTF_8),
NodeExistsPolicy.OVERWRITE);
} else {
byte[] sera = serialize(so);
byte[] data = new byte[sera.length + 2];
System.arraycopy(sera, 0, data, 2, sera.length);
data[0] = 'O';
data[1] = ' ';
zk.putPersistentData(getTXPath(tid) + "/" + txInfo, data, NodeExistsPolicy.OVERWRITE);
}
} catch (KeeperException | InterruptedException e2) {
throw new IllegalStateException(e2);
}
}
@Override
public Serializable getTransactionInfo(long tid, Fate.TxInfo txInfo) {
verifyReserved(tid);
try {
byte[] data = zk.getData(getTXPath(tid) + "/" + txInfo);
if (data[0] == 'O') {
byte[] sera = new byte[data.length - 2];
System.arraycopy(data, 2, sera, 0, sera.length);
return (Serializable) deserialize(sera);
} else if (data[0] == 'S') {
return new String(data, 2, data.length - 2, UTF_8);
} else {
throw new IllegalStateException("Bad node data " + txInfo);
}
} catch (NoNodeException nne) {
return null;
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
@Override
public List<Long> list() {
try {
ArrayList<Long> l = new ArrayList<>();
List<String> transactions = zk.getChildren(path);
for (String txid : transactions) {
l.add(parseTid(txid));
}
return l;
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
@Override
public long timeCreated(long tid) {
verifyReserved(tid);
try {
Stat stat = zk.getZooKeeper().exists(getTXPath(tid), false);
return stat.getCtime();
} catch (Exception e) {
return 0;
}
}
@Override
public List<ReadOnlyRepo<T>> getStack(long tid) {
String txpath = getTXPath(tid);
outer: while (true) {
List<String> ops;
try {
ops = zk.getChildren(txpath);
} catch (KeeperException.NoNodeException e) {
return Collections.emptyList();
} catch (KeeperException | InterruptedException e1) {
throw new IllegalStateException(e1);
}
ops = new ArrayList<>(ops);
ops.sort(Collections.reverseOrder());
ArrayList<ReadOnlyRepo<T>> dops = new ArrayList<>();
for (String child : ops) {
if (child.startsWith("repo_")) {
byte[] ser;
try {
ser = zk.getData(txpath + "/" + child);
@SuppressWarnings("unchecked")
var repo = (ReadOnlyRepo<T>) deserialize(ser);
dops.add(repo);
} catch (KeeperException.NoNodeException e) {
// children changed so start over
continue outer;
} catch (KeeperException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
}
return dops;
}
}
}
| 9,726 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/StackOverflowException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
public class StackOverflowException extends Exception {
public StackOverflowException(String msg) {
super(msg);
}
private static final long serialVersionUID = 1L;
}
| 9,727 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/FateTxId.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
import java.util.regex.Pattern;
import org.apache.accumulo.core.util.FastFormat;
import com.google.common.base.Preconditions;
public class FateTxId {
private static final String PREFIX = "FATE[";
private static final String SUFFIX = "]";
private final static Pattern PATTERN =
Pattern.compile(Pattern.quote(PREFIX) + "[0-9a-fA-F]+" + Pattern.quote(SUFFIX));
private static String getHex(String fmtTid) {
return fmtTid.substring(PREFIX.length(), fmtTid.length() - SUFFIX.length());
}
/**
* @return true if string was created by {@link #formatTid(long)} and false otherwise.
*/
public static boolean isFormatedTid(String fmtTid) {
return PATTERN.matcher(fmtTid).matches();
}
/**
* Reverses {@link #formatTid(long)}
*/
public static long fromString(String fmtTid) {
Preconditions.checkArgument(fmtTid.startsWith(PREFIX) && fmtTid.endsWith(SUFFIX));
return Long.parseLong(getHex(fmtTid), 16);
}
/**
* Formats transaction ids in a consistent way that is useful for logging and persisting.
*/
public static String formatTid(long tid) {
// do not change how this formats without considering implications for persistence
return FastFormat.toHexString(PREFIX, tid, SUFFIX);
}
public static long parseTidFromUserInput(String s) {
if (isFormatedTid(s)) {
return fromString(s);
} else {
return Long.parseLong(s, 16);
}
}
}
| 9,728 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooCache.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.util.Collections;
import java.util.ConcurrentModificationException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.LockSupport;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.accumulo.core.lock.ServiceLock;
import org.apache.accumulo.core.lock.ServiceLock.ServiceLockPath;
import org.apache.accumulo.core.lock.ServiceLockData;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.Code;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* A cache for values stored in ZooKeeper. Values are kept up to date as they change.
*/
public class ZooCache {
private static final Logger log = LoggerFactory.getLogger(ZooCache.class);
private final ZCacheWatcher watcher = new ZCacheWatcher();
private final Watcher externalWatcher;
private final ReadWriteLock cacheLock = new ReentrantReadWriteLock(false);
private final Lock cacheWriteLock = cacheLock.writeLock();
private final Lock cacheReadLock = cacheLock.readLock();
private final HashMap<String,byte[]> cache;
private final HashMap<String,ZcStat> statCache;
private final HashMap<String,List<String>> childrenCache;
private final ZooReader zReader;
private volatile boolean closed = false;
public static class ZcStat {
private long ephemeralOwner;
private long mzxid;
public ZcStat() {}
private ZcStat(Stat stat) {
this.ephemeralOwner = stat.getEphemeralOwner();
this.mzxid = stat.getMzxid();
}
public long getEphemeralOwner() {
return ephemeralOwner;
}
private void set(ZcStat cachedStat) {
this.ephemeralOwner = cachedStat.ephemeralOwner;
this.mzxid = cachedStat.mzxid;
}
@VisibleForTesting
public void setEphemeralOwner(long ephemeralOwner) {
this.ephemeralOwner = ephemeralOwner;
}
public long getMzxid() {
return mzxid;
}
}
private static class ImmutableCacheCopies {
final Map<String,byte[]> cache;
final Map<String,ZcStat> statCache;
final Map<String,List<String>> childrenCache;
final long updateCount;
ImmutableCacheCopies(long updateCount) {
this.updateCount = updateCount;
cache = Collections.emptyMap();
statCache = Collections.emptyMap();
childrenCache = Collections.emptyMap();
}
ImmutableCacheCopies(long updateCount, Map<String,byte[]> cache, Map<String,ZcStat> statCache,
Map<String,List<String>> childrenCache) {
this.updateCount = updateCount;
this.cache = Collections.unmodifiableMap(new HashMap<>(cache));
this.statCache = Collections.unmodifiableMap(new HashMap<>(statCache));
this.childrenCache = Collections.unmodifiableMap(new HashMap<>(childrenCache));
}
ImmutableCacheCopies(long updateCount, ImmutableCacheCopies prev,
Map<String,List<String>> childrenCache) {
this.updateCount = updateCount;
this.cache = prev.cache;
this.statCache = prev.statCache;
this.childrenCache = Collections.unmodifiableMap(new HashMap<>(childrenCache));
}
ImmutableCacheCopies(long updateCount, Map<String,byte[]> cache, Map<String,ZcStat> statCache,
ImmutableCacheCopies prev) {
this.updateCount = updateCount;
this.cache = Collections.unmodifiableMap(new HashMap<>(cache));
this.statCache = Collections.unmodifiableMap(new HashMap<>(statCache));
this.childrenCache = prev.childrenCache;
}
}
private volatile ImmutableCacheCopies immutableCache = new ImmutableCacheCopies(0);
private long updateCount = 0;
/**
* Returns a ZooKeeper session. Calls should be made within run of ZooRunnable after caches are
* checked. This will be performed at each retry of the run method. Calls to this method should be
* made, ideally, after cache checks since other threads may have succeeded when updating the
* cache. Doing this will ensure that we don't pay the cost of retrieving a ZooKeeper session on
* each retry until we've ensured the caches aren't populated for a given node.
*
* @return ZooKeeper session.
*/
private ZooKeeper getZooKeeper() {
return zReader.getZooKeeper();
}
private class ZCacheWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
if (log.isTraceEnabled()) {
log.trace("{}", event);
}
switch (event.getType()) {
case NodeDataChanged:
case NodeChildrenChanged:
case NodeCreated:
case NodeDeleted:
remove(event.getPath());
break;
case None:
switch (event.getState()) {
case Closed:
// Closed is a new event in ZK 3.5 generated client-side when the ZK client is closed
// These are ignored, because they are generated by SingletonManager closing
// ZooKeepers for ZooSession, and SingletonManager is already responsible for clearing
// caches via the registered ZooCacheFactory singleton
log.trace("ZooKeeper connection closed, ignoring; {}", event);
break;
case Disconnected:
log.trace("ZooKeeper connection disconnected, clearing cache; {}", event);
clear();
break;
case SyncConnected:
log.trace("ZooKeeper connection established, ignoring; {}", event);
break;
case Expired:
log.trace("ZooKeeper connection expired, clearing cache; {}", event);
clear();
break;
default:
log.warn("Unhandled {}", event);
break;
}
break;
default:
log.warn("Unhandled {}", event);
break;
}
if (externalWatcher != null) {
externalWatcher.process(event);
}
}
}
/**
* Creates a new cache. The given watcher is called whenever a watched node changes.
*
* @param reader ZooKeeper reader
* @param watcher watcher object
*/
public ZooCache(ZooReader reader, Watcher watcher) {
this.zReader = reader;
this.cache = new HashMap<>();
this.statCache = new HashMap<>();
this.childrenCache = new HashMap<>();
this.externalWatcher = watcher;
}
private abstract static class ZooRunnable<T> {
/**
* Runs an operation against ZooKeeper. Retries are performed by the retry method when
* KeeperExceptions occur.
*
* Changes were made in ACCUMULO-4388 so that the run method no longer accepts Zookeeper as an
* argument, and instead relies on the ZooRunnable implementation to call
* {@link #getZooKeeper()}. Performing the call to retrieving a ZooKeeper Session after caches
* are checked has the benefit of limiting ZK connections and blocking as a result of obtaining
* these sessions.
*
* @return T the result of the runnable
*/
abstract T run() throws KeeperException, InterruptedException;
/**
* Retry will attempt to call the run method. Run should make a call to {@link #getZooKeeper()}
* after checks to cached information are made. This change, per ACCUMULO-4388 ensures that we
* don't create a ZooKeeper session when information is cached, and access to ZooKeeper is
* unnecessary.
*
* @return result of the runnable access success ( i.e. no exceptions ).
*/
public T retry() {
int sleepTime = 100;
while (true) {
try {
return run();
} catch (KeeperException e) {
final Code code = e.code();
if (code == Code.NONODE) {
log.error("Looked up non-existent node in cache " + e.getPath(), e);
} else if (code == Code.CONNECTIONLOSS || code == Code.OPERATIONTIMEOUT
|| code == Code.SESSIONEXPIRED) {
log.warn("Saw (possibly) transient exception communicating with ZooKeeper, will retry",
e);
} else {
log.warn("Zookeeper error, will retry", e);
}
} catch (InterruptedException e) {
log.info("Zookeeper error, will retry", e);
} catch (ConcurrentModificationException e) {
log.debug("Zookeeper was modified, will retry");
}
try {
// do not hold lock while sleeping
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
log.debug("Wait in retry() was interrupted.", e);
}
LockSupport.parkNanos(sleepTime);
if (sleepTime < 10_000) {
sleepTime = (int) (sleepTime + sleepTime * RANDOM.get().nextDouble());
}
}
}
}
/**
* Gets the children of the given node. A watch is established by this call.
*
* @param zPath path of node
* @return children list, or null if node has no children or does not exist
*/
public List<String> getChildren(final String zPath) {
Preconditions.checkState(!closed);
ZooRunnable<List<String>> zr = new ZooRunnable<>() {
@Override
public List<String> run() throws KeeperException, InterruptedException {
// only read volatile once for consistency
ImmutableCacheCopies lic = immutableCache;
if (lic.childrenCache.containsKey(zPath)) {
return lic.childrenCache.get(zPath);
}
cacheWriteLock.lock();
try {
if (childrenCache.containsKey(zPath)) {
return childrenCache.get(zPath);
}
final ZooKeeper zooKeeper = getZooKeeper();
List<String> children = zooKeeper.getChildren(zPath, watcher);
if (children != null) {
children = List.copyOf(children);
}
childrenCache.put(zPath, children);
immutableCache = new ImmutableCacheCopies(++updateCount, immutableCache, childrenCache);
return children;
} catch (KeeperException ke) {
if (ke.code() != Code.NONODE) {
throw ke;
}
} finally {
cacheWriteLock.unlock();
}
return null;
}
};
return zr.retry();
}
/**
* Gets data at the given path. Status information is not returned. A watch is established by this
* call.
*
* @param zPath path to get
* @return path data, or null if non-existent
*/
public byte[] get(final String zPath) {
return get(zPath, null);
}
/**
* Gets data at the given path, filling status information into the given <code>Stat</code>
* object. A watch is established by this call.
*
* @param zPath path to get
* @param status status object to populate
* @return path data, or null if non-existent
*/
public byte[] get(final String zPath, final ZcStat status) {
Preconditions.checkState(!closed);
ZooRunnable<byte[]> zr = new ZooRunnable<>() {
@Override
public byte[] run() throws KeeperException, InterruptedException {
ZcStat zstat = null;
// only read volatile once so following code works with a consistent snapshot
ImmutableCacheCopies lic = immutableCache;
byte[] val = lic.cache.get(zPath);
if (val != null || lic.cache.containsKey(zPath)) {
if (status != null) {
zstat = lic.statCache.get(zPath);
copyStats(status, zstat);
}
return val;
}
/*
* The following call to exists() is important, since we are caching that a node does not
* exist. Once the node comes into existence, it will be added to the cache. But this
* notification of a node coming into existence will only be given if exists() was
* previously called. If the call to exists() is bypassed and only getData() is called with
* a special case that looks for Code.NONODE in the KeeperException, then non-existence can
* not be cached.
*/
cacheWriteLock.lock();
try {
final ZooKeeper zooKeeper = getZooKeeper();
Stat stat = zooKeeper.exists(zPath, watcher);
byte[] data = null;
if (stat == null) {
if (log.isTraceEnabled()) {
log.trace("zookeeper did not contain {}", zPath);
}
} else {
try {
data = zooKeeper.getData(zPath, watcher, stat);
zstat = new ZcStat(stat);
} catch (KeeperException.BadVersionException | KeeperException.NoNodeException e1) {
throw new ConcurrentModificationException();
}
if (log.isTraceEnabled()) {
log.trace("zookeeper contained {} {}", zPath,
(data == null ? null : new String(data, UTF_8)));
}
}
put(zPath, data, zstat);
copyStats(status, zstat);
return data;
} finally {
cacheWriteLock.unlock();
}
}
};
return zr.retry();
}
/**
* Helper method to copy stats from the cached stat into userStat
*
* @param userStat user Stat object
* @param cachedStat cached statistic, that is or will be cached
*/
protected void copyStats(ZcStat userStat, ZcStat cachedStat) {
Preconditions.checkState(!closed);
if (userStat != null && cachedStat != null) {
userStat.set(cachedStat);
}
}
private void put(String zPath, byte[] data, ZcStat stat) {
cacheWriteLock.lock();
try {
cache.put(zPath, data);
statCache.put(zPath, stat);
immutableCache = new ImmutableCacheCopies(++updateCount, cache, statCache, immutableCache);
} finally {
cacheWriteLock.unlock();
}
}
private void remove(String zPath) {
cacheWriteLock.lock();
try {
cache.remove(zPath);
childrenCache.remove(zPath);
statCache.remove(zPath);
immutableCache = new ImmutableCacheCopies(++updateCount, cache, statCache, childrenCache);
} finally {
cacheWriteLock.unlock();
}
}
/**
* Clears this cache.
*/
public void clear() {
Preconditions.checkState(!closed);
cacheWriteLock.lock();
try {
cache.clear();
childrenCache.clear();
statCache.clear();
immutableCache = new ImmutableCacheCopies(++updateCount);
} finally {
cacheWriteLock.unlock();
}
}
public void close() {
closed = true;
}
/**
* Returns a monotonically increasing count of the number of time the cache was updated. If the
* count is the same, then it means cache did not change.
*/
public long getUpdateCount() {
Preconditions.checkState(!closed);
return immutableCache.updateCount;
}
/**
* Checks if a data value (or lack of one) is cached.
*
* @param zPath path of node
* @return true if data value is cached
*/
@VisibleForTesting
boolean dataCached(String zPath) {
cacheReadLock.lock();
try {
return immutableCache.cache.containsKey(zPath) && cache.containsKey(zPath);
} finally {
cacheReadLock.unlock();
}
}
/**
* Checks if children of a node (or lack of them) are cached.
*
* @param zPath path of node
* @return true if children are cached
*/
@VisibleForTesting
boolean childrenCached(String zPath) {
cacheReadLock.lock();
try {
return immutableCache.childrenCache.containsKey(zPath) && childrenCache.containsKey(zPath);
} finally {
cacheReadLock.unlock();
}
}
/**
* Clears this cache of all information about nodes rooted at the given path.
*
* @param zPath path of top node
*/
public void clear(String zPath) {
Preconditions.checkState(!closed);
cacheWriteLock.lock();
try {
cache.keySet().removeIf(path -> path.startsWith(zPath));
childrenCache.keySet().removeIf(path -> path.startsWith(zPath));
statCache.keySet().removeIf(path -> path.startsWith(zPath));
immutableCache = new ImmutableCacheCopies(++updateCount, cache, statCache, childrenCache);
} finally {
cacheWriteLock.unlock();
}
}
public Optional<ServiceLockData> getLockData(ServiceLockPath path) {
List<String> children = ServiceLock.validateAndSort(path, getChildren(path.toString()));
if (children == null || children.isEmpty()) {
return Optional.empty();
}
String lockNode = children.get(0);
byte[] lockData = get(path + "/" + lockNode);
if (log.isTraceEnabled()) {
log.trace("Data from lockNode {} is {}", lockNode, new String(lockData, UTF_8));
}
if (lockData == null) {
lockData = new byte[0];
}
return ServiceLockData.parse(lockData);
}
}
| 9,729 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.math.BigInteger;
import java.security.NoSuchAlgorithmException;
import java.time.Instant;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.data.InstanceId;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.Code;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.ZooDefs.Perms;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import org.apache.zookeeper.data.Stat;
import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
public class ZooUtil {
public enum NodeExistsPolicy {
SKIP, OVERWRITE, FAIL
}
public enum NodeMissingPolicy {
SKIP, CREATE, FAIL
}
// used for zookeeper stat print formatting
private static final DateTimeFormatter fmt =
DateTimeFormatter.ofPattern("EEE MMM dd HH:mm:ss 'UTC' yyyy");
public static class LockID {
public long eid;
public String path;
public String node;
public LockID(String root, String serializedLID) {
String[] sa = serializedLID.split("\\$");
int lastSlash = sa[0].lastIndexOf('/');
if (sa.length != 2 || lastSlash < 0) {
throw new IllegalArgumentException("Malformed serialized lock id " + serializedLID);
}
if (lastSlash == 0) {
path = root;
} else {
path = root + "/" + sa[0].substring(0, lastSlash);
}
node = sa[0].substring(lastSlash + 1);
eid = new BigInteger(sa[1], 16).longValue();
}
public LockID(String path, String node, long eid) {
this.path = path;
this.node = node;
this.eid = eid;
}
public String serialize(String root) {
return path.substring(root.length()) + "/" + node + "$" + Long.toHexString(eid);
}
@Override
public String toString() {
return " path = " + path + " node = " + node + " eid = " + Long.toHexString(eid);
}
}
// Need to use Collections.unmodifiableList() instead of List.of() or List.copyOf(), because
// ImmutableCollections.contains() doesn't handle nulls properly (JDK-8265905) and ZooKeeper (as
// of 3.8.1) calls acl.contains((Object) null) which throws a NPE when passed an immutable
// collection
public static final List<ACL> PRIVATE =
Collections.unmodifiableList(new ArrayList<>(Ids.CREATOR_ALL_ACL));
public static final List<ACL> PUBLIC;
static {
var publicTmp = new ArrayList<>(PRIVATE);
publicTmp.add(new ACL(Perms.READ, Ids.ANYONE_ID_UNSAFE));
PUBLIC = Collections.unmodifiableList(publicTmp);
}
public static String getRoot(final InstanceId instanceId) {
return Constants.ZROOT + "/" + instanceId;
}
/**
* This method will delete a node and all its children.
*/
public static void recursiveDelete(ZooKeeper zooKeeper, String zPath, NodeMissingPolicy policy)
throws KeeperException, InterruptedException {
if (policy == NodeMissingPolicy.CREATE) {
throw new IllegalArgumentException(policy.name() + " is invalid for this operation");
}
try {
// delete children
for (String child : zooKeeper.getChildren(zPath, null)) {
recursiveDelete(zooKeeper, zPath + "/" + child, NodeMissingPolicy.SKIP);
}
// delete self
zooKeeper.delete(zPath, -1);
} catch (KeeperException e) {
// new child appeared; try again
if (e.code() == Code.NOTEMPTY) {
recursiveDelete(zooKeeper, zPath, policy);
}
if (policy == NodeMissingPolicy.SKIP && e.code() == Code.NONODE) {
return;
}
throw e;
}
}
/**
* For debug: print the ZooKeeper Stat with value labels for a more user friendly string. The
* format matches the zookeeper cli stat command.
*
* @param stat Zookeeper Stat structure
* @return a formatted string.
*/
public static String printStat(final Stat stat) {
if (stat == null) {
return "null";
}
return "\ncZxid = " + String.format("0x%x", stat.getCzxid()) + "\nctime = "
+ getFmtTime(stat.getCtime()) + "\nmZxid = " + String.format("0x%x", stat.getMzxid())
+ "\nmtime = " + getFmtTime(stat.getMtime()) + "\npZxid = "
+ String.format("0x%x", stat.getPzxid()) + "\ncversion = " + stat.getCversion()
+ "\ndataVersion = " + stat.getVersion() + "\naclVersion = " + stat.getAversion()
+ "\nephemeralOwner = " + String.format("0x%x", stat.getEphemeralOwner())
+ "\ndataLength = " + stat.getDataLength() + "\nnumChildren = " + stat.getNumChildren();
}
private static String getFmtTime(final long epoch) {
OffsetDateTime timestamp =
OffsetDateTime.ofInstant(Instant.ofEpochMilli(epoch), ZoneOffset.UTC);
return fmt.format(timestamp);
}
/**
* Get the ZooKeeper digest based on the instance secret that is used within ZooKeeper for
* authentication. This method is primary intended to be used to validate ZooKeeper ACLs. Use
* {@link #digestAuth(ZooKeeper, String)} to add authorizations to ZooKeeper.
*/
public static Id getZkDigestAuthId(final String secret) {
try {
final String scheme = "digest";
String auth = DigestAuthenticationProvider.generateDigest("accumulo:" + secret);
return new Id(scheme, auth);
} catch (NoSuchAlgorithmException ex) {
throw new IllegalArgumentException("Could not generate ZooKeeper digest string", ex);
}
}
public static void digestAuth(ZooKeeper zoo, String secret) {
auth(zoo, "digest", ("accumulo:" + secret).getBytes(UTF_8));
}
public static void auth(ZooKeeper zoo, String scheme, byte[] auth) {
zoo.addAuthInfo(scheme, auth);
}
}
| 9,730 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooSession.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.HashMap;
import java.util.Map;
import org.apache.accumulo.core.singletons.SingletonManager;
import org.apache.accumulo.core.singletons.SingletonService;
import org.apache.accumulo.core.util.AddressUtil;
import org.apache.accumulo.core.util.UtilWaitThread;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.KeeperState;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.ZooKeeper.States;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ZooSession {
public static class ZooSessionShutdownException extends RuntimeException {
public ZooSessionShutdownException(String msg) {
super(msg);
}
private static final long serialVersionUID = 1L;
}
private static final Logger log = LoggerFactory.getLogger(ZooSession.class);
private static class ZooSessionInfo {
public ZooSessionInfo(ZooKeeper zooKeeper) {
this.zooKeeper = zooKeeper;
}
ZooKeeper zooKeeper;
}
private static Map<String,ZooSessionInfo> sessions = new HashMap<>();
static {
SingletonManager.register(new SingletonService() {
@Override
public boolean isEnabled() {
return ZooSession.isEnabled();
}
@Override
public void enable() {
ZooSession.enable();
}
@Override
public void disable() {
ZooSession.disable();
}
});
}
private static String sessionKey(String keepers, int timeout, String scheme, byte[] auth) {
return keepers + ":" + timeout + ":" + (scheme == null ? "" : scheme) + ":"
+ (auth == null ? "" : new String(auth, UTF_8));
}
private static class ZooWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
if (event.getState() == KeeperState.Expired) {
log.debug("Session expired; {}", event);
}
}
}
/**
* @param host comma separated list of zk servers
* @param timeout in milliseconds
* @param scheme authentication type, e.g. 'digest', may be null
* @param auth authentication-scheme-specific token, may be null
* @param watcher ZK notifications, may be null
*/
static ZooKeeper connect(String host, int timeout, String scheme, byte[] auth, Watcher watcher) {
final int TIME_BETWEEN_CONNECT_CHECKS_MS = 100;
int connectTimeWait = Math.min(10_000, timeout);
boolean tryAgain = true;
long sleepTime = 100;
ZooKeeper zooKeeper = null;
long startTime = System.nanoTime();
while (tryAgain) {
try {
zooKeeper = new ZooKeeper(host, timeout, watcher);
// it may take some time to get connected to zookeeper if some of the servers are down
for (int i = 0; i < connectTimeWait / TIME_BETWEEN_CONNECT_CHECKS_MS && tryAgain; i++) {
if (zooKeeper.getState().equals(States.CONNECTED)) {
if (auth != null) {
ZooUtil.auth(zooKeeper, scheme, auth);
}
tryAgain = false;
} else {
UtilWaitThread.sleep(TIME_BETWEEN_CONNECT_CHECKS_MS);
}
}
} catch (IOException e) {
if (e instanceof UnknownHostException) {
/*
* Make sure we wait at least as long as the JVM TTL for negative DNS responses
*/
int ttl = AddressUtil.getAddressCacheNegativeTtl((UnknownHostException) e);
sleepTime = Math.max(sleepTime, (ttl + 1) * 1000L);
}
log.warn("Connection to zooKeeper failed, will try again in "
+ String.format("%.2f secs", sleepTime / 1000.0), e);
} finally {
if (tryAgain && zooKeeper != null) {
try {
zooKeeper.close();
zooKeeper = null;
} catch (InterruptedException e) {
log.warn("interrupted", e);
}
}
}
long stopTime = System.nanoTime();
long duration = NANOSECONDS.toMillis(stopTime - startTime);
if (duration > 2L * timeout) {
throw new IllegalStateException("Failed to connect to zookeeper (" + host
+ ") within 2x zookeeper timeout period " + timeout);
}
if (tryAgain) {
if (2L * timeout < duration + sleepTime + connectTimeWait) {
sleepTime = 2L * timeout - duration - connectTimeWait;
}
if (sleepTime < 0) {
connectTimeWait -= sleepTime;
sleepTime = 0;
}
UtilWaitThread.sleep(sleepTime);
if (sleepTime < 10000) {
sleepTime = sleepTime + (long) (sleepTime * RANDOM.get().nextDouble());
}
}
}
return zooKeeper;
}
public static ZooKeeper getAuthenticatedSession(String zooKeepers, int timeout, String scheme,
byte[] auth) {
return getSession(zooKeepers, timeout, scheme, auth);
}
public static ZooKeeper getAnonymousSession(String zooKeepers, int timeout) {
return getSession(zooKeepers, timeout, null, null);
}
private static synchronized ZooKeeper getSession(String zooKeepers, int timeout, String scheme,
byte[] auth) {
if (sessions == null) {
throw new ZooSessionShutdownException(
"The Accumulo singleton that that tracks zookeeper session is disabled. This is likely "
+ "caused by all AccumuloClients being closed or garbage collected.");
}
String sessionKey = sessionKey(zooKeepers, timeout, scheme, auth);
// a read-only session can use a session with authorizations, so cache a copy for it w/out auths
String readOnlySessionKey = sessionKey(zooKeepers, timeout, null, null);
ZooSessionInfo zsi = sessions.get(sessionKey);
if (zsi != null && zsi.zooKeeper.getState() == States.CLOSED) {
log.debug("Removing closed ZooKeeper session to {}", zooKeepers);
if (auth != null && sessions.get(readOnlySessionKey) == zsi) {
sessions.remove(readOnlySessionKey);
}
zsi = null;
sessions.remove(sessionKey);
}
if (zsi == null) {
ZooWatcher watcher = new ZooWatcher();
log.debug("Connecting to {} with timeout {} with auth", zooKeepers, timeout);
zsi = new ZooSessionInfo(connect(zooKeepers, timeout, scheme, auth, watcher));
sessions.put(sessionKey, zsi);
if (auth != null && !sessions.containsKey(readOnlySessionKey)) {
sessions.put(readOnlySessionKey, zsi);
}
}
return zsi.zooKeeper;
}
private static synchronized boolean isEnabled() {
return sessions != null;
}
private static synchronized void enable() {
if (sessions != null) {
return;
}
sessions = new HashMap<>();
}
private static synchronized void disable() {
if (sessions == null) {
return;
}
for (ZooSessionInfo zsi : sessions.values()) {
try {
zsi.zooKeeper.close();
} catch (Exception e) {
log.debug("Error closing zookeeper during shutdown", e);
}
}
sessions = null;
}
}
| 9,731 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/DistributedReadWriteLock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.DAYS;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import org.apache.accumulo.core.util.UtilWaitThread;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A ReadWriteLock that can be implemented in ZooKeeper. Features the ability to store data with the
* lock, and recover the lock using that data to find the lock.
*/
public class DistributedReadWriteLock implements java.util.concurrent.locks.ReadWriteLock {
static enum LockType {
READ, WRITE,
}
// serializer for lock type and user data
static class ParsedLock {
public ParsedLock(LockType type, byte[] userData) {
this.type = type;
this.userData = Arrays.copyOf(userData, userData.length);
}
public ParsedLock(byte[] lockData) {
if (lockData == null || lockData.length < 1) {
throw new IllegalArgumentException();
}
int split = -1;
for (int i = 0; i < lockData.length; i++) {
if (lockData[i] == ':') {
split = i;
break;
}
}
if (split == -1) {
throw new IllegalArgumentException();
}
this.type = LockType.valueOf(new String(lockData, 0, split, UTF_8));
this.userData = Arrays.copyOfRange(lockData, split + 1, lockData.length);
}
public LockType getType() {
return type;
}
public byte[] getUserData() {
return userData;
}
public byte[] getLockData() {
byte[] typeBytes = type.name().getBytes(UTF_8);
byte[] result = new byte[userData.length + 1 + typeBytes.length];
System.arraycopy(typeBytes, 0, result, 0, typeBytes.length);
result[typeBytes.length] = ':';
System.arraycopy(userData, 0, result, typeBytes.length + 1, userData.length);
return result;
}
private LockType type;
private byte[] userData;
}
// This kind of lock can be easily implemented by ZooKeeper
// You make an entry at the bottom of the queue, readers run when there are no writers ahead of
// them,
// a writer only runs when they are at the top of the queue.
public interface QueueLock {
SortedMap<Long,byte[]> getEarlierEntries(long entry);
void removeEntry(long entry);
long addEntry(byte[] data);
}
private static final Logger log = LoggerFactory.getLogger(DistributedReadWriteLock.class);
static class ReadLock implements Lock {
QueueLock qlock;
byte[] userData;
long entry = -1;
ReadLock(QueueLock qlock, byte[] userData) {
this.qlock = qlock;
this.userData = userData;
}
// for recovery
ReadLock(QueueLock qlock, byte[] userData, long entry) {
this.qlock = qlock;
this.userData = userData;
this.entry = entry;
}
protected LockType lockType() {
return LockType.READ;
}
@Override
public void lock() {
while (true) {
try {
if (tryLock(1, DAYS)) {
return;
}
} catch (InterruptedException ex) {
// ignored
}
}
}
@Override
public void lockInterruptibly() throws InterruptedException {
while (!Thread.currentThread().isInterrupted()) {
if (tryLock(100, MILLISECONDS)) {
return;
}
}
}
@Override
public boolean tryLock() {
if (entry == -1) {
entry = qlock.addEntry(new ParsedLock(this.lockType(), this.userData).getLockData());
log.info("Added lock entry {} userData {} lockType {}", entry,
new String(this.userData, UTF_8), lockType());
}
SortedMap<Long,byte[]> entries = qlock.getEarlierEntries(entry);
for (Entry<Long,byte[]> entry : entries.entrySet()) {
ParsedLock parsed = new ParsedLock(entry.getValue());
if (entry.getKey().equals(this.entry)) {
return true;
}
if (parsed.type == LockType.WRITE) {
return false;
}
}
throw new IllegalStateException("Did not find our own lock in the queue: " + this.entry
+ " userData " + new String(this.userData, UTF_8) + " lockType " + lockType());
}
@Override
public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
long now = System.currentTimeMillis();
long returnTime = now + MILLISECONDS.convert(time, unit);
while (returnTime > now) {
if (tryLock()) {
return true;
}
// TODO: do something better than poll - ACCUMULO-1310
UtilWaitThread.sleep(100);
now = System.currentTimeMillis();
}
return false;
}
@Override
public void unlock() {
if (entry == -1) {
return;
}
log.debug("Removing lock entry {} userData {} lockType {}", entry,
new String(this.userData, UTF_8), lockType());
qlock.removeEntry(entry);
entry = -1;
}
@Override
public Condition newCondition() {
throw new UnsupportedOperationException();
}
}
static class WriteLock extends ReadLock {
WriteLock(QueueLock qlock, byte[] userData) {
super(qlock, userData);
}
WriteLock(QueueLock qlock, byte[] userData, long entry) {
super(qlock, userData, entry);
}
@Override
protected LockType lockType() {
return LockType.WRITE;
}
@Override
public boolean tryLock() {
if (entry == -1) {
entry = qlock.addEntry(new ParsedLock(this.lockType(), this.userData).getLockData());
log.info("Added lock entry {} userData {} lockType {}", entry,
new String(this.userData, UTF_8), lockType());
}
SortedMap<Long,byte[]> entries = qlock.getEarlierEntries(entry);
Iterator<Entry<Long,byte[]>> iterator = entries.entrySet().iterator();
if (!iterator.hasNext()) {
throw new IllegalStateException("Did not find our own lock in the queue: " + this.entry
+ " userData " + new String(this.userData, UTF_8) + " lockType " + lockType());
}
return iterator.next().getKey().equals(entry);
}
}
private QueueLock qlock;
private byte[] data;
public DistributedReadWriteLock(QueueLock qlock, byte[] data) {
this.qlock = qlock;
this.data = Arrays.copyOf(data, data.length);
}
public static Lock recoverLock(QueueLock qlock, byte[] data) {
SortedMap<Long,byte[]> entries = qlock.getEarlierEntries(Long.MAX_VALUE);
for (Entry<Long,byte[]> entry : entries.entrySet()) {
ParsedLock parsed = new ParsedLock(entry.getValue());
if (Arrays.equals(data, parsed.getUserData())) {
switch (parsed.getType()) {
case READ:
return new ReadLock(qlock, parsed.getUserData(), entry.getKey());
case WRITE:
return new WriteLock(qlock, parsed.getUserData(), entry.getKey());
}
}
}
return null;
}
@Override
public Lock readLock() {
return new ReadLock(qlock, data);
}
@Override
public Lock writeLock() {
return new WriteLock(qlock, data);
}
}
| 9,732 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Predicate;
import org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException;
import org.apache.accumulo.core.util.Retry;
import org.apache.accumulo.core.util.Retry.RetryFactory;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.Code;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ZooReader {
private static final Logger log = LoggerFactory.getLogger(ZooReader.class);
protected static final RetryFactory RETRY_FACTORY =
Retry.builder().maxRetries(10).retryAfter(250, MILLISECONDS).incrementBy(250, MILLISECONDS)
.maxWait(2, MINUTES).backOffFactor(1.5).logInterval(3, MINUTES).createFactory();
protected final String keepers;
protected final int timeout;
public ZooReader(String keepers, int timeout) {
this.keepers = requireNonNull(keepers);
this.timeout = timeout;
}
public ZooReaderWriter asWriter(String secret) {
return new ZooReaderWriter(keepers, timeout, secret);
}
protected ZooKeeper getZooKeeper() {
return ZooSession.getAnonymousSession(keepers, timeout);
}
protected RetryFactory getRetryFactory() {
return RETRY_FACTORY;
}
/**
* Returns the requested ZooKeeper client session timeout. The client may negotiate a different
* value and the actual negotiated value may change after a re-connect.
*
* @return the timeout in milliseconds
*/
public int getSessionTimeout() {
return timeout;
}
public byte[] getData(String zPath) throws KeeperException, InterruptedException {
return retryLoop(zk -> zk.getData(zPath, null, null));
}
public byte[] getData(String zPath, Stat stat) throws KeeperException, InterruptedException {
return retryLoop(zk -> zk.getData(zPath, null, requireNonNull(stat)));
}
public byte[] getData(String zPath, Watcher watcher)
throws KeeperException, InterruptedException {
return retryLoop(zk -> zk.getData(zPath, requireNonNull(watcher), null));
}
public byte[] getData(String zPath, Watcher watcher, Stat stat)
throws KeeperException, InterruptedException {
return retryLoop(zk -> zk.getData(zPath, requireNonNull(watcher), requireNonNull(stat)));
}
public Stat getStatus(String zPath) throws KeeperException, InterruptedException {
return retryLoop(zk -> zk.exists(zPath, null));
}
public Stat getStatus(String zPath, Watcher watcher)
throws KeeperException, InterruptedException {
return retryLoop(zk -> zk.exists(zPath, requireNonNull(watcher)));
}
public List<String> getChildren(String zPath) throws KeeperException, InterruptedException {
return retryLoop(zk -> zk.getChildren(zPath, null));
}
public List<String> getChildren(String zPath, Watcher watcher)
throws KeeperException, InterruptedException {
return retryLoop(zk -> zk.getChildren(zPath, requireNonNull(watcher)));
}
public boolean exists(String zPath) throws KeeperException, InterruptedException {
return getStatus(zPath) != null;
}
public boolean exists(String zPath, Watcher watcher)
throws KeeperException, InterruptedException {
return getStatus(zPath, watcher) != null;
}
public void sync(final String path) throws KeeperException, InterruptedException {
final AtomicInteger rc = new AtomicInteger();
final CountDownLatch waiter = new CountDownLatch(1);
getZooKeeper().sync(path, (code, arg1, arg2) -> {
rc.set(code);
waiter.countDown();
}, null);
waiter.await();
Code code = Code.get(rc.get());
if (code != Code.OK) {
throw KeeperException.create(code);
}
}
protected interface ZKFunction<R> {
R apply(ZooKeeper zk) throws KeeperException, InterruptedException;
}
protected interface ZKFunctionMutator<R> {
R apply(ZooKeeper zk)
throws KeeperException, InterruptedException, AcceptableThriftTableOperationException;
}
/**
* This method executes the provided function, retrying several times for transient issues.
*/
protected <R> R retryLoop(ZKFunction<R> f) throws KeeperException, InterruptedException {
return retryLoop(f, e -> false);
}
/**
* This method executes the provided function, retrying several times for transient issues, and
* retrying indefinitely for special cases. Use {@link #retryLoop(ZKFunction)} if there is no such
* special case.
*/
protected <R> R retryLoop(ZKFunction<R> zkf, Predicate<KeeperException> alwaysRetryCondition)
throws KeeperException, InterruptedException {
try {
// reuse the code from retryLoopMutator, but suppress the exception
// because ZKFunction can't throw it
return retryLoopMutator(zkf::apply, alwaysRetryCondition);
} catch (AcceptableThriftTableOperationException e) {
throw new AssertionError("Not possible; " + ZKFunction.class.getName() + " can't throw "
+ AcceptableThriftTableOperationException.class.getName());
}
}
/**
* This method is a special case of {@link #retryLoop(ZKFunction, Predicate)}, intended to handle
* {@link ZooReaderWriter#mutateExisting(String, ZooReaderWriter.Mutator)}'s additional thrown
* exception type. Other callers should use {@link #retryLoop(ZKFunction)} or
* {@link #retryLoop(ZKFunction, Predicate)} instead.
*/
protected <R> R retryLoopMutator(ZKFunctionMutator<R> zkf,
Predicate<KeeperException> alwaysRetryCondition)
throws KeeperException, InterruptedException, AcceptableThriftTableOperationException {
requireNonNull(zkf);
requireNonNull(alwaysRetryCondition);
var retries = getRetryFactory().createRetry();
while (true) {
try {
return zkf.apply(getZooKeeper());
} catch (KeeperException e) {
if (alwaysRetryCondition.test(e)) {
retries.waitForNextAttempt(log,
"attempting to communicate with zookeeper after exception that always requires retry: "
+ e.getMessage());
continue;
} else if (useRetryForTransient(retries, e)) {
continue;
}
throw e;
}
}
}
// should use an available retry if there are retries left and
// the issue is one that is likely to be transient
private static boolean useRetryForTransient(Retry retries, KeeperException e)
throws KeeperException, InterruptedException {
final Code c = e.code();
if (c == Code.CONNECTIONLOSS || c == Code.OPERATIONTIMEOUT || c == Code.SESSIONEXPIRED) {
log.warn("Saw (possibly) transient exception communicating with ZooKeeper", e);
if (retries.canRetry()) {
retries.useRetry();
retries.waitForNextAttempt(log,
"attempting to communicate with zookeeper after exception: " + e.getMessage());
return true;
}
log.error("Retry attempts ({}) exceeded trying to communicate with ZooKeeper",
retries.retriesCompleted());
}
// non-transient issue should always be thrown and handled by the caller
return false;
}
}
| 9,733 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooReservation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeExistsPolicy;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeMissingPolicy;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.slf4j.LoggerFactory;
public class ZooReservation {
public static boolean attempt(ZooReaderWriter zk, String path, String reservationID,
String debugInfo) throws KeeperException, InterruptedException {
if (reservationID.contains(":")) {
throw new IllegalArgumentException();
}
while (true) {
try {
zk.putPersistentData(path, (reservationID + ":" + debugInfo).getBytes(UTF_8),
NodeExistsPolicy.FAIL);
return true;
} catch (NodeExistsException nee) {
byte[] zooData;
try {
zooData = zk.getData(path);
} catch (NoNodeException nne) {
continue;
}
String idInZoo = new String(zooData, UTF_8).split(":")[0];
return idInZoo.equals(reservationID);
}
}
}
public static void release(ZooReaderWriter zk, String path, String reservationID)
throws KeeperException, InterruptedException {
byte[] zooData;
try {
zooData = zk.getData(path);
} catch (NoNodeException e) {
// Just logging a warning, if data is gone then our work here is done.
LoggerFactory.getLogger(ZooReservation.class).debug("Node does not exist {}", path);
return;
}
String zooDataStr = new String(zooData, UTF_8);
String idInZoo = zooDataStr.split(":")[0];
if (!idInZoo.equals(reservationID)) {
throw new IllegalStateException("Tried to release reservation " + path
+ " with data mismatch " + reservationID + " " + zooDataStr);
}
zk.recursiveDelete(path, NodeMissingPolicy.SKIP);
}
}
| 9,734 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/FateLock.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static java.util.Objects.requireNonNull;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.fate.zookeeper.DistributedReadWriteLock.QueueLock;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeExistsPolicy;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeMissingPolicy;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.KeeperException.NotEmptyException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A persistent lock mechanism in ZooKeeper used for locking tables during FaTE operations.
*/
public class FateLock implements QueueLock {
private static final Logger log = LoggerFactory.getLogger(FateLock.class);
private static final String PREFIX = "flock#";
private final ZooReaderWriter zoo;
private final FateLockPath path;
public static class FateLockPath {
private final String path;
private FateLockPath(String path) {
this.path = requireNonNull(path);
}
@Override
public String toString() {
return this.path;
}
}
public static FateLockPath path(String path) {
return new FateLockPath(path);
}
public FateLock(ZooReaderWriter zrw, FateLockPath path) {
this.zoo = requireNonNull(zrw);
this.path = requireNonNull(path);
}
@Override
public long addEntry(byte[] data) {
String newPath;
try {
while (true) {
try {
newPath = zoo.putPersistentSequential(path + "/" + PREFIX, data);
String[] parts = newPath.split("/");
String last = parts[parts.length - 1];
return Long.parseLong(last.substring(PREFIX.length()));
} catch (NoNodeException nne) {
// the parent does not exist so try to create it
zoo.putPersistentData(path.toString(), new byte[] {}, NodeExistsPolicy.SKIP);
}
}
} catch (KeeperException | InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
@Override
public SortedMap<Long,byte[]> getEarlierEntries(long entry) {
SortedMap<Long,byte[]> result = new TreeMap<>();
try {
List<String> children = Collections.emptyList();
try {
children = zoo.getChildren(path.toString());
} catch (KeeperException.NoNodeException ex) {
// the path does not exist (it was deleted or not created yet), that is ok there are no
// earlier entries then
}
for (String name : children) {
// this try catch must be done inside the loop because some subset of the children may exist
try {
byte[] data = zoo.getData(path + "/" + name);
long order = Long.parseLong(name.substring(PREFIX.length()));
if (order <= entry) {
result.put(order, data);
}
} catch (KeeperException.NoNodeException ex) {
// ignored
}
}
} catch (KeeperException | InterruptedException ex) {
throw new IllegalStateException(ex);
}
return result;
}
@Override
public void removeEntry(long entry) {
try {
zoo.recursiveDelete(path + String.format("/%s%010d", PREFIX, entry), NodeMissingPolicy.SKIP);
try {
// try to delete the parent if it has no children
zoo.delete(path.toString());
} catch (NotEmptyException nee) {
// the path had other lock nodes, no big deal
}
} catch (KeeperException | InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
/**
* Validate and sort child nodes at this lock path by the lock prefix
*/
public static List<String> validateAndSort(FateLockPath path, List<String> children) {
log.trace("validating and sorting children at path {}", path);
List<String> validChildren = new ArrayList<>();
if (children == null || children.isEmpty()) {
return validChildren;
}
children.forEach(c -> {
log.trace("Validating {}", c);
if (c.startsWith(PREFIX)) {
int idx = c.indexOf('#');
String sequenceNum = c.substring(idx + 1);
if (sequenceNum.length() == 10) {
try {
log.trace("Testing number format of {}", sequenceNum);
Integer.parseInt(sequenceNum);
validChildren.add(c);
} catch (NumberFormatException e) {
log.warn("Fate lock found with invalid sequence number format: {} (not a number)", c);
}
} else {
log.warn("Fate lock found with invalid sequence number format: {} (not 10 characters)",
c);
}
} else {
log.warn("Fate lock found with invalid lock format: {} (does not start with {})", c,
PREFIX);
}
});
if (validChildren.size() > 1) {
validChildren.sort((o1, o2) -> {
// Lock should be of the form:
// lock-sequenceNumber
// Example:
// flock#0000000000
// Lock length - sequenceNumber length
// 16 - 10
int secondHashIdx = 6;
return Integer.valueOf(o1.substring(secondHashIdx))
.compareTo(Integer.valueOf(o2.substring(secondHashIdx)));
});
}
log.trace("Children nodes (size: {}): {}", validChildren.size(), validChildren);
return validChildren;
}
}
| 9,735 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooReaderWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Objects.requireNonNull;
import java.util.List;
import org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeExistsPolicy;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeMissingPolicy;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.Code;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
public class ZooReaderWriter extends ZooReader {
public interface Mutator {
byte[] mutate(byte[] currentValue) throws AcceptableThriftTableOperationException;
}
public ZooReaderWriter(AccumuloConfiguration conf) {
this(conf.get(Property.INSTANCE_ZK_HOST),
(int) conf.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT),
conf.get(Property.INSTANCE_SECRET));
}
private final String secret;
private final byte[] auth;
ZooReaderWriter(String keepers, int timeoutInMillis, String secret) {
super(keepers, timeoutInMillis);
this.secret = requireNonNull(secret);
this.auth = ("accumulo:" + secret).getBytes(UTF_8);
}
@Override
public ZooReaderWriter asWriter(String secret) {
if (this.secret.equals(secret)) {
return this;
}
return super.asWriter(secret);
}
@Override
public ZooKeeper getZooKeeper() {
return ZooSession.getAuthenticatedSession(keepers, timeout, "digest", auth);
}
/**
* Retrieve the ACL list that was on the node
*/
public List<ACL> getACL(String zPath) throws KeeperException, InterruptedException {
return retryLoop(zk -> zk.getACL(zPath, null));
}
/**
* Create a persistent node with the default ACL
*
* @return true if the data was set on a new node or overwritten, and false if an existing node
* was skipped
*/
public boolean putPersistentData(String zPath, byte[] data, NodeExistsPolicy policy)
throws KeeperException, InterruptedException {
return putPersistentData(zPath, data, policy, ZooUtil.PUBLIC);
}
/**
* Create a persistent node with the private ACL
*
* @return true if the data was set on a new node or overwritten, and false if an existing node
* was skipped
*/
public boolean putPrivatePersistentData(String zPath, byte[] data, NodeExistsPolicy policy)
throws KeeperException, InterruptedException {
return putPersistentData(zPath, data, policy, ZooUtil.PRIVATE);
}
/**
* Create a persistent node with the provided ACLs
*
* @return true if the data was set on a new node or overwritten, and false if an existing node
* was skipped
*/
public boolean putPersistentData(String zPath, byte[] data, NodeExistsPolicy policy,
List<ACL> acls) throws KeeperException, InterruptedException {
// zk allows null ACLs, but it's probably a bug in Accumulo if we see it used in our code
requireNonNull(acls);
requireNonNull(policy);
return retryLoop(zk -> {
try {
zk.create(zPath, data, acls, CreateMode.PERSISTENT);
return true;
} catch (KeeperException e) {
if (e.code() == Code.NODEEXISTS) {
switch (policy) {
case SKIP:
return false;
case OVERWRITE:
zk.setData(zPath, data, -1);
return true;
case FAIL:
default:
// re-throw below
}
}
throw e;
}
},
// if OVERWRITE policy is used, create() can fail with NODEEXISTS;
// then, the node can be deleted, causing setData() to fail with NONODE;
// if that happens, the following code ensures we retry
e -> e.code() == Code.NONODE && policy == NodeExistsPolicy.OVERWRITE);
}
/**
* Overwrite a persistent node if the data version matches.
*
* @param zPath the zookeeper path
* @param data the byte array data
* @param expectedVersion the expected data version of the zookeeper node.
* @return true if the data was set, false if the version does not match expected.
* @throws KeeperException if a KeeperException occurs (no node most likely)
* @throws InterruptedException if the zookeeper write is interrupted.
*/
public boolean overwritePersistentData(String zPath, byte[] data, final int expectedVersion)
throws KeeperException, InterruptedException {
return retryLoop(zk -> {
try {
zk.setData(zPath, data, expectedVersion);
return true;
} catch (KeeperException.BadVersionException ex) {
return false;
}
});
}
/**
* Create a persistent sequential node with the default ACL
*
* @return the actual path of the created node
*/
public String putPersistentSequential(String zPath, byte[] data)
throws KeeperException, InterruptedException {
return retryLoop(
zk -> zk.create(zPath, data, ZooUtil.PUBLIC, CreateMode.PERSISTENT_SEQUENTIAL));
}
/**
* Create an ephemeral node with the default ACL
*/
public void putEphemeralData(String zPath, byte[] data)
throws KeeperException, InterruptedException {
retryLoop(zk -> zk.create(zPath, data, ZooUtil.PUBLIC, CreateMode.EPHEMERAL));
}
/**
* Create an ephemeral sequential node with the default ACL
*
* @return the actual path of the created node
*/
public String putEphemeralSequential(String zPath, byte[] data)
throws KeeperException, InterruptedException {
return retryLoop(zk -> zk.create(zPath, data, ZooUtil.PUBLIC, CreateMode.EPHEMERAL_SEQUENTIAL));
}
/**
* Recursively copy any persistent data from the source to the destination, using the default ACL
* to create any missing nodes and skipping over any ephemeral data.
*/
public void recursiveCopyPersistentOverwrite(String source, String destination)
throws KeeperException, InterruptedException {
var stat = new Stat();
byte[] data = getData(source, stat);
// only copy persistent data
if (stat.getEphemeralOwner() != 0) {
return;
}
putPersistentData(destination, data, NodeExistsPolicy.OVERWRITE);
if (stat.getNumChildren() > 0) {
for (String child : getChildren(source)) {
recursiveCopyPersistentOverwrite(source + "/" + child, destination + "/" + child);
}
}
}
/**
* Update an existing ZK node using the provided mutator function. If it's possible the node
* doesn't exist yet, use {@link #mutateOrCreate(String, byte[], Mutator)} instead.
*
* @return the value set on the node
*/
public byte[] mutateExisting(String zPath, Mutator mutator)
throws KeeperException, InterruptedException, AcceptableThriftTableOperationException {
requireNonNull(mutator);
return retryLoopMutator(zk -> {
var stat = new Stat();
byte[] data = zk.getData(zPath, null, stat);
// this mutator can throw AcceptableThriftTableOperationException
data = mutator.mutate(data);
if (data != null) {
zk.setData(zPath, data, stat.getVersion());
}
return data;
}, e -> e.code() == Code.BADVERSION); // always retry if bad version
}
/**
* Create a new {@link CreateMode#PERSISTENT} ZK node with the default ACL if it does not exist.
* If it does already exist, then update it with the provided mutator function. If it is known to
* exist already, use {@link #mutateExisting(String, Mutator)} instead.
*
* @return the value set on the node
*/
public byte[] mutateOrCreate(String zPath, byte[] createValue, Mutator mutator)
throws KeeperException, InterruptedException, AcceptableThriftTableOperationException {
requireNonNull(mutator);
return putPersistentData(zPath, createValue, NodeExistsPolicy.SKIP) ? createValue
: mutateExisting(zPath, mutator);
}
/**
* Ensure the provided path exists, using persistent nodes, empty data, and the default ACL for
* any missing path elements.
*/
public void mkdirs(String path) throws KeeperException, InterruptedException {
if (path.equals("")) {
// terminal condition for recursion
return;
}
if (!path.startsWith("/")) {
throw new IllegalArgumentException(path + "does not start with /");
}
if (exists(path)) {
return;
}
String parent = path.substring(0, path.lastIndexOf("/"));
mkdirs(parent);
putPersistentData(path, new byte[0], NodeExistsPolicy.SKIP);
}
/**
* Delete the specified node, and ignore NONODE exceptions.
*/
public void delete(String path) throws KeeperException, InterruptedException {
try {
deleteStrict(path, -1);
} catch (KeeperException e) {
if (e.code() != Code.NONODE) {
throw e;
}
}
}
/**
* Delete the specified node if the version matches the provided version. All underlying
* exceptions are thrown back to the caller.
*
* @param path the path of the ZooKeeper node.
* @param version the expected version of the ZooKeeper node.
*/
public void deleteStrict(final String path, final int version)
throws KeeperException, InterruptedException {
retryLoop(zk -> {
zk.delete(path, version);
return null;
});
}
/**
* This method will delete a node and all its children.
*/
public void recursiveDelete(String zPath, NodeMissingPolicy policy)
throws KeeperException, InterruptedException {
ZooUtil.recursiveDelete(getZooKeeper(), zPath, policy);
}
}
| 9,736 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/ZooCacheFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import java.util.HashMap;
import java.util.Map;
import org.apache.accumulo.core.singletons.SingletonManager;
import org.apache.accumulo.core.singletons.SingletonService;
/**
* A factory for {@link ZooCache} instances.
* <p>
* Implementation note: We were using the instances map to track all the instances that have been
* created, so we could explicitly close them when the SingletonManager detected that the last
* legacy client (using Connector/ZooKeeperInstance) has gone away. This class may no longer be
* needed, since the legacy client code has been removed, so long as the ZooCache instances it is
* tracking are managed as resources within ClientContext or ServerContext, and explicitly closed
* when those are closed.
*/
public class ZooCacheFactory {
private static Map<String,ZooCache> instances = new HashMap<>();
private static boolean enabled = true;
public ZooCacheFactory() {}
private static boolean isEnabled() {
synchronized (instances) {
return enabled;
}
}
private static void enable() {
synchronized (instances) {
enabled = true;
}
}
private static void disable() {
synchronized (instances) {
try {
instances.values().forEach(ZooCache::close);
} finally {
instances.clear();
enabled = false;
}
}
}
static {
// important because of ZOOKEEPER-2368.. when zookeeper client is closed it does not generate an
// event!
SingletonManager.register(new SingletonService() {
@Override
public synchronized boolean isEnabled() {
return ZooCacheFactory.isEnabled();
}
@Override
public synchronized void enable() {
ZooCacheFactory.enable();
}
@Override
public synchronized void disable() {
ZooCacheFactory.disable();
}
});
}
/**
* Gets a {@link ZooCache}. The same object may be returned for multiple calls with the same
* arguments.
*
* @param zooKeepers comma-separated list of ZooKeeper host[:port]s
* @param sessionTimeout session timeout
* @return cache object
*/
public ZooCache getZooCache(String zooKeepers, int sessionTimeout) {
String key = zooKeepers + ":" + sessionTimeout;
synchronized (instances) {
if (!isEnabled()) {
throw new IllegalStateException("The Accumulo singleton for zookeeper caching is "
+ "disabled. This is likely caused by all AccumuloClients being closed");
}
return instances.computeIfAbsent(key, k -> getNewZooCache(zooKeepers, sessionTimeout));
}
}
/**
* Always return a new {@link ZooCache}.
*
* @param zooKeepers comma-separated list of ZooKeeper host[:port]s
* @param sessionTimeout session timeout
* @return a new instance
*/
public ZooCache getNewZooCache(String zooKeepers, int sessionTimeout) {
return new ZooCache(new ZooReader(zooKeepers, sessionTimeout), null);
}
/**
* Resets the factory. All cached objects are flushed.
*/
void reset() {
synchronized (instances) {
instances.clear();
}
}
}
| 9,737 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/schema/Section.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.schema;
import org.apache.accumulo.core.data.Range;
public class Section {
private String rowPrefix;
private Range range;
public Section(String startRow, boolean startInclusive, String endRow, boolean endInclusive) {
rowPrefix = startRow;
range = new Range(startRow, startInclusive, endRow, endInclusive);
}
public String getRowPrefix() {
return rowPrefix;
}
public Range getRange() {
return range;
}
}
| 9,738 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager/state | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager/state/tables/TableState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.manager.state.tables;
public enum TableState {
// NEW while making directories and tablets;
NEW,
// ONLINE tablets will be assigned
ONLINE,
// OFFLINE tablets will be taken offline
OFFLINE,
// DELETING waiting for tablets to go offline and table will be removed
DELETING,
// UNKNOWN is NOT a valid state; it is reserved for unrecognized serialized
// representations of table state
UNKNOWN
}
| 9,739 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager/balancer/TableStatisticsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.manager.balancer;
import org.apache.accumulo.core.manager.thrift.TableInfo;
import org.apache.accumulo.core.spi.balancer.data.TableStatistics;
public class TableStatisticsImpl implements TableStatistics {
private final TableInfo thriftInfo;
public static TableStatisticsImpl fromThrift(TableInfo tableInfo) {
return tableInfo == null ? null : new TableStatisticsImpl(tableInfo);
}
public TableStatisticsImpl(TableInfo thriftInfo) {
this.thriftInfo = thriftInfo;
}
public TableStatisticsImpl(TableStatisticsImpl other) {
this.thriftInfo = new TableInfo(other.thriftInfo);
}
@Override
public long getRecords() {
return thriftInfo.getRecs();
}
@Override
public long getRecordsInMemory() {
return thriftInfo.getRecsInMemory();
}
@Override
public int getTabletCount() {
return thriftInfo.getTablets();
}
@Override
public int getOnlineTabletCount() {
return thriftInfo.getOnlineTablets();
}
public void setOnlineTabletCount(int onlineTabletCount) {
thriftInfo.setOnlineTablets(onlineTabletCount);
}
@Override
public double getIngestRate() {
return thriftInfo.getIngestRate();
}
@Override
public double getIngestByteRate() {
return thriftInfo.getIngestByteRate();
}
@Override
public double getQueryRate() {
return thriftInfo.getQueryRate();
}
@Override
public double getQueryByteRate() {
return thriftInfo.getQueryByteRate();
}
@Override
public double getScanRate() {
return thriftInfo.getScanRate();
}
@Override
public int compareTo(TableStatistics o) {
return thriftInfo.compareTo(((TableStatisticsImpl) o).thriftInfo);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TableStatisticsImpl that = (TableStatisticsImpl) o;
return thriftInfo.equals(that.thriftInfo);
}
@Override
public int hashCode() {
return thriftInfo.hashCode();
}
@Override
public String toString() {
return thriftInfo.toString();
}
}
| 9,740 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager/balancer/TabletStatisticsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.manager.balancer;
import static java.util.Objects.requireNonNull;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.spi.balancer.data.TabletStatistics;
import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
public class TabletStatisticsImpl implements TabletStatistics {
private final TabletStats thriftStats;
private final TabletId tabletId;
public TabletStatisticsImpl(TabletStats thriftStats) {
this.thriftStats = requireNonNull(thriftStats);
tabletId = new TabletIdImpl(KeyExtent.fromThrift(thriftStats.getExtent()));
}
@Override
public TabletId getTabletId() {
return tabletId;
}
@Override
public long getNumEntries() {
return thriftStats.getNumEntries();
}
@Override
public long getSplitCreationTime() {
return thriftStats.getSplitCreationTime();
}
@Override
public double getIngestRate() {
return thriftStats.getIngestRate();
}
@Override
public double getQueryRate() {
return thriftStats.getQueryRate();
}
@Override
public int compareTo(TabletStatistics o) {
return thriftStats.compareTo(((TabletStatisticsImpl) o).thriftStats);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TabletStatisticsImpl that = (TabletStatisticsImpl) o;
return thriftStats.equals(that.thriftStats);
}
@Override
public int hashCode() {
return thriftStats.hashCode();
}
@Override
public String toString() {
return thriftStats.toString();
}
}
| 9,741 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager/balancer/BalanceParamsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.manager.balancer;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.SortedMap;
import java.util.stream.Collectors;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.manager.thrift.TabletServerStatus;
import org.apache.accumulo.core.metadata.TServerInstance;
import org.apache.accumulo.core.spi.balancer.TabletBalancer;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
public class BalanceParamsImpl implements TabletBalancer.BalanceParameters {
private final SortedMap<TabletServerId,TServerStatus> currentStatus;
private final Set<TabletId> currentMigrations;
private final List<TabletMigration> migrationsOut;
private final SortedMap<TServerInstance,TabletServerStatus> thriftCurrentStatus;
private final Set<KeyExtent> thriftCurrentMigrations;
public static BalanceParamsImpl fromThrift(SortedMap<TabletServerId,TServerStatus> currentStatus,
SortedMap<TServerInstance,TabletServerStatus> thriftCurrentStatus,
Set<KeyExtent> thriftCurrentMigrations) {
Set<TabletId> currentMigrations = thriftCurrentMigrations.stream().map(TabletIdImpl::new)
.collect(Collectors.toUnmodifiableSet());
return new BalanceParamsImpl(currentStatus, currentMigrations, new ArrayList<>(),
thriftCurrentStatus, thriftCurrentMigrations);
}
public BalanceParamsImpl(SortedMap<TabletServerId,TServerStatus> currentStatus,
Set<TabletId> currentMigrations, List<TabletMigration> migrationsOut) {
this.currentStatus = currentStatus;
this.currentMigrations = currentMigrations;
this.migrationsOut = migrationsOut;
this.thriftCurrentStatus = null;
this.thriftCurrentMigrations = null;
}
private BalanceParamsImpl(SortedMap<TabletServerId,TServerStatus> currentStatus,
Set<TabletId> currentMigrations, List<TabletMigration> migrationsOut,
SortedMap<TServerInstance,TabletServerStatus> thriftCurrentStatus,
Set<KeyExtent> thriftCurrentMigrations) {
this.currentStatus = currentStatus;
this.currentMigrations = currentMigrations;
this.migrationsOut = migrationsOut;
this.thriftCurrentStatus = thriftCurrentStatus;
this.thriftCurrentMigrations = thriftCurrentMigrations;
}
@Override
public SortedMap<TabletServerId,TServerStatus> currentStatus() {
return currentStatus;
}
@Override
public Set<TabletId> currentMigrations() {
return currentMigrations;
}
@Override
public List<TabletMigration> migrationsOut() {
return migrationsOut;
}
public SortedMap<TServerInstance,TabletServerStatus> thriftCurrentStatus() {
return thriftCurrentStatus;
}
public Set<KeyExtent> thriftCurrentMigrations() {
return thriftCurrentMigrations;
}
public void addMigration(KeyExtent extent, TServerInstance oldServer, TServerInstance newServer) {
TabletId id = new TabletIdImpl(extent);
TabletServerId oldTsid = new TabletServerIdImpl(oldServer);
TabletServerId newTsid = new TabletServerIdImpl(newServer);
migrationsOut.add(new TabletMigration(id, oldTsid, newTsid));
}
}
| 9,742 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager/balancer/TabletServerIdImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.manager.balancer;
import static java.util.Objects.requireNonNull;
import org.apache.accumulo.core.metadata.TServerInstance;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import com.google.common.net.HostAndPort;
/**
* @since 2.1.0
*/
public class TabletServerIdImpl implements TabletServerId {
private final TServerInstance tServerInstance;
public static TabletServerIdImpl fromThrift(TServerInstance tsi) {
return (tsi == null) ? null : new TabletServerIdImpl(tsi);
}
public TabletServerIdImpl(String host, int port, String session) {
requireNonNull(host);
this.tServerInstance = new TServerInstance(HostAndPort.fromParts(host, port), session);
}
public TabletServerIdImpl(TServerInstance tServerInstance) {
this.tServerInstance = requireNonNull(tServerInstance);
}
@Override
public String getHost() {
return tServerInstance.getHostAndPort().getHost();
}
@Override
public int getPort() {
return tServerInstance.getHostAndPort().getPort();
}
@Override
public String getSession() {
return tServerInstance.getSession();
}
@Override
public int compareTo(TabletServerId o) {
return tServerInstance.compareTo(((TabletServerIdImpl) o).tServerInstance);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TabletServerIdImpl that = (TabletServerIdImpl) o;
return tServerInstance.equals(that.tServerInstance);
}
@Override
public int hashCode() {
return tServerInstance.hashCode();
}
@Override
public String toString() {
return getHost() + ':' + getPort() + '[' + getSession() + ']';
}
public TServerInstance toThrift() {
return tServerInstance;
}
public static TServerInstance toThrift(TabletServerId tabletServerId) {
if (tabletServerId instanceof TabletServerIdImpl) {
return ((TabletServerIdImpl) tabletServerId).toThrift();
} else {
return new TServerInstance(
HostAndPort.fromParts(tabletServerId.getHost(), tabletServerId.getPort()),
tabletServerId.getSession());
}
}
}
| 9,743 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager/balancer/AssignmentParamsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.manager.balancer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.manager.thrift.TabletServerStatus;
import org.apache.accumulo.core.metadata.TServerInstance;
import org.apache.accumulo.core.spi.balancer.TabletBalancer;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
public class AssignmentParamsImpl implements TabletBalancer.AssignmentParameters {
private final SortedMap<TabletServerId,TServerStatus> currentStatus;
private final Map<TabletId,TabletServerId> unassigned;
private final Map<TabletId,TabletServerId> assignmentsOut;
private final SortedMap<TServerInstance,TabletServerStatus> thriftCurrentStatus;
private final Map<KeyExtent,TServerInstance> thriftUnassigned;
private final Map<KeyExtent,TServerInstance> thriftAssignmentsOut;
public static AssignmentParamsImpl fromThrift(
SortedMap<TServerInstance,TabletServerStatus> currentStatus,
Map<KeyExtent,TServerInstance> unassigned, Map<KeyExtent,TServerInstance> assignmentsOut) {
SortedMap<TabletServerId,TServerStatus> currentStatusNew = new TreeMap<>();
currentStatus.forEach((tsi, status) -> currentStatusNew.put(new TabletServerIdImpl(tsi),
TServerStatusImpl.fromThrift(status)));
Map<TabletId,TabletServerId> unassignedNew = new HashMap<>();
unassigned.forEach(
(ke, tsi) -> unassignedNew.put(new TabletIdImpl(ke), TabletServerIdImpl.fromThrift(tsi)));
return new AssignmentParamsImpl(Collections.unmodifiableSortedMap(currentStatusNew),
Collections.unmodifiableMap(unassignedNew), currentStatus, unassigned, assignmentsOut);
}
public AssignmentParamsImpl(SortedMap<TabletServerId,TServerStatus> currentStatus,
Map<TabletId,TabletServerId> unassigned, Map<TabletId,TabletServerId> assignmentsOut) {
this.currentStatus = currentStatus;
this.unassigned = unassigned;
this.assignmentsOut = assignmentsOut;
this.thriftCurrentStatus = null;
this.thriftUnassigned = null;
this.thriftAssignmentsOut = null;
}
private AssignmentParamsImpl(SortedMap<TabletServerId,TServerStatus> currentStatus,
Map<TabletId,TabletServerId> unassigned,
SortedMap<TServerInstance,TabletServerStatus> thriftCurrentStatus,
Map<KeyExtent,TServerInstance> thriftUnassigned,
Map<KeyExtent,TServerInstance> thriftAssignmentsOut) {
this.currentStatus = currentStatus;
this.unassigned = unassigned;
this.assignmentsOut = null;
this.thriftCurrentStatus = thriftCurrentStatus;
this.thriftUnassigned = thriftUnassigned;
this.thriftAssignmentsOut = thriftAssignmentsOut;
}
@Override
public SortedMap<TabletServerId,TServerStatus> currentStatus() {
return currentStatus;
}
@Override
public Map<TabletId,TabletServerId> unassignedTablets() {
return unassigned;
}
@Override
public void addAssignment(TabletId tabletId, TabletServerId tabletServerId) {
if (assignmentsOut != null) {
assignmentsOut.put(tabletId, tabletServerId);
}
if (thriftAssignmentsOut != null) {
thriftAssignmentsOut.put(KeyExtent.fromTabletId(tabletId),
TabletServerIdImpl.toThrift(tabletServerId));
}
}
public SortedMap<TServerInstance,TabletServerStatus> thriftCurrentStatus() {
return thriftCurrentStatus;
}
public Map<KeyExtent,TServerInstance> thriftUnassigned() {
return thriftUnassigned;
}
public Map<KeyExtent,TServerInstance> thriftAssignmentsOut() {
return thriftAssignmentsOut;
}
}
| 9,744 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/manager/balancer/TServerStatusImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.manager.balancer;
import static java.util.Objects.requireNonNull;
import java.util.HashMap;
import java.util.Map;
import org.apache.accumulo.core.manager.thrift.TabletServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TableStatistics;
public class TServerStatusImpl implements TServerStatus {
private final TabletServerStatus thriftStatus;
private Map<String,TableStatistics> tableInfoMap;
public static TServerStatusImpl fromThrift(TabletServerStatus tss) {
return (tss == null) ? null : new TServerStatusImpl(tss);
}
public TServerStatusImpl(TabletServerStatus thriftStatus) {
this.thriftStatus = requireNonNull(thriftStatus);
if (thriftStatus.getTableMap() == null) {
tableInfoMap = null;
} else {
tableInfoMap = new HashMap<>();
thriftStatus.getTableMap()
.forEach((name, info) -> tableInfoMap.put(name, TableStatisticsImpl.fromThrift(info)));
}
}
@Override
public Map<String,TableStatistics> getTableMap() {
return tableInfoMap;
}
public void setTableMap(Map<String,TableStatistics> tableInfoMap) {
this.tableInfoMap = tableInfoMap;
}
@Override
public long getLastContact() {
return thriftStatus.getLastContact();
}
@Override
public String getName() {
return thriftStatus.getName();
}
@Override
public double getOsLoad() {
return thriftStatus.getOsLoad();
}
@Override
public long getHoldTime() {
return thriftStatus.getHoldTime();
}
@Override
public long getLookups() {
return thriftStatus.getLookups();
}
@Override
public long getIndexCacheHits() {
return thriftStatus.getIndexCacheHits();
}
@Override
public long getIndexCacheRequests() {
return thriftStatus.getIndexCacheRequest();
}
@Override
public long getDataCacheHits() {
return thriftStatus.getDataCacheHits();
}
@Override
public long getDataCacheRequests() {
return thriftStatus.getDataCacheRequest();
}
@Override
public long getFlushes() {
return thriftStatus.getFlushs();
}
@Override
public long getSyncs() {
return thriftStatus.getSyncs();
}
@Override
public String getVersion() {
return thriftStatus.getVersion();
}
@Override
public long getResponseTime() {
return thriftStatus.getResponseTime();
}
@Override
public int compareTo(TServerStatus o) {
return thriftStatus.compareTo(((TServerStatusImpl) o).thriftStatus);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TServerStatusImpl that = (TServerStatusImpl) o;
return thriftStatus.equals(that.thriftStatus);
}
@Override
public int hashCode() {
return thriftStatus.hashCode();
}
@Override
public String toString() {
return thriftStatus.toString();
}
public TabletServerStatus toThrift() {
return thriftStatus;
}
}
| 9,745 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* This package provides a place for plugin interfaces intended for Accumulo users to implement. The
* types under this package should adhere to the following rules.
*
* <ul>
* <li>No changes should be made in a bug fix release.
* <li>Any changes can be made in a minor or major version. Incompatible changes should only be made
* if there is a benefit to users that outweighs the negative impact to users. If possible use
* deprecation instead of making incompatible changes.
* <li>All types used within this package should be declared in Accumulo's public API or under this
* package. This rule makes it possible to achieve the other rules. Accumulo's build uses Apilyzer
* to check this rule.
* <li>Types under this package are intended for Accumulo users. If a type is only intended to be
* used internally by Accumulo, it should not be placed here.
* </ul>
*
* <p>
* There are no hard and fast rules for a developer trying to decide if something should go into
* this package, Accumulo's public API, or outside of both. If it can follow the rules then its
* eligible for placement here. Below are some reasons things have or have not been placed here in
* the past.
*
* <ul>
* <li>Scan executors and cache plugins were placed here because they are tightly coupled to
* Accumulo's scan execution model. If the execution model for scans is changed, incompatible
* changes may have to be made. Trying to support a deprecation cycle may mean having to support a
* new and old scan execution model in a single release, which may be impractical. Also these
* plugins never impact users data or query results, they only impact performance via table
* configuration.
* <li>Crypto was placed here because its experimental and subject to change.
* <li>Iterators are server side plugins, but were placed into Accumulo's public API instead of here
* because they are so tightly coupled to users data model. Iterators can change the data returned
* by a scan. The stricter rules of the API respect this tight coupling with users data model.
* </ul>
*
* <p>
* Before this package was created many plugin interfaces were created for Accumulo. These plugin
* interfaces used internal Accumulo types, which transitively used other internal types. This
* undisciplined use of any types made it impractical to reason about, analyze, or make any
* guarantees about plugin stability. This package was created to solve that problem. Hopefully
* existing plugins (like the balancer) can be migrated to this package.
*/
package org.apache.accumulo.core.spi;
| 9,746 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/PerTableCryptoServiceFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import static java.util.Objects.requireNonNull;
import static org.apache.accumulo.core.conf.Property.GENERAL_ARBITRARY_PROP_PREFIX;
import static org.apache.accumulo.core.conf.Property.TABLE_CRYPTO_PREFIX;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.accumulo.core.data.TableId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A factory that loads a CryptoService based on {@link TableId}.
*/
public class PerTableCryptoServiceFactory implements CryptoServiceFactory {
private static final Logger log = LoggerFactory.getLogger(PerTableCryptoServiceFactory.class);
private final ConcurrentHashMap<TableId,CryptoService> cryptoServiceMap =
new ConcurrentHashMap<>();
public static final String WAL_NAME_PROP = GENERAL_ARBITRARY_PROP_PREFIX + "crypto.wal.service";
public static final String RECOVERY_NAME_PROP =
GENERAL_ARBITRARY_PROP_PREFIX + "crypto.recovery.service";
public static final String TABLE_SERVICE_NAME_PROP = TABLE_CRYPTO_PREFIX + "service";
// The WALs do not have table IDs so these fake IDs are used for caching
private static final TableId WAL_FAKE_ID = TableId.of("WAL_CryptoService_FAKE_ID");
private static final TableId REC_FAKE_ID = TableId.of("RECOVERY_CryptoService_FAKE_ID");
@Override
public CryptoService getService(CryptoEnvironment environment, Map<String,String> props) {
if (environment.getScope() == CryptoEnvironment.Scope.WAL) {
return cryptoServiceMap.computeIfAbsent(WAL_FAKE_ID, (id) -> getWALServiceInitialized(props));
} else if (environment.getScope() == CryptoEnvironment.Scope.RECOVERY) {
return cryptoServiceMap.computeIfAbsent(REC_FAKE_ID,
(id) -> getRecoveryServiceInitialized(props));
} else {
if (environment.getTableId().isEmpty()) {
log.debug("No tableId present in crypto env: " + environment);
return NoCryptoServiceFactory.NONE;
}
TableId tableId = environment.getTableId().orElseThrow();
if (props == null || props.isEmpty() || props.get(TABLE_SERVICE_NAME_PROP) == null) {
return NoCryptoServiceFactory.NONE;
}
if (environment.getScope() == CryptoEnvironment.Scope.TABLE) {
return cryptoServiceMap.computeIfAbsent(tableId,
(id) -> getTableServiceInitialized(tableId, props));
}
}
throw new IllegalStateException("Invalid config for crypto " + environment + " " + props);
}
private CryptoService getWALServiceInitialized(Map<String,String> props) {
String name = requireNonNull(props.get(WAL_NAME_PROP),
"The property " + WAL_NAME_PROP + " is required for encrypting WALs.");
log.debug("New CryptoService for WAL scope {}={}", WAL_NAME_PROP, name);
CryptoService cs = newCryptoService(name);
cs.init(props);
return cs;
}
private CryptoService getRecoveryServiceInitialized(Map<String,String> props) {
String name = requireNonNull(props.get(RECOVERY_NAME_PROP),
"The property " + RECOVERY_NAME_PROP + " is required for encrypting during recovery.");
log.debug("New CryptoService for Recovery scope {}={}", RECOVERY_NAME_PROP, name);
CryptoService cs = newCryptoService(name);
cs.init(props);
return cs;
}
private CryptoService getTableServiceInitialized(TableId tableId, Map<String,String> props) {
String name = requireNonNull(props.get(TABLE_SERVICE_NAME_PROP),
"The property " + TABLE_SERVICE_NAME_PROP + " is required for encrypting tables.");
log.debug("New CryptoService for TABLE({}) {}={}", tableId, TABLE_SERVICE_NAME_PROP, name);
CryptoService cs = newCryptoService(name);
cs.init(props);
return cs;
}
public int getCount() {
return cryptoServiceMap.size();
}
}
| 9,747 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/NoCryptoServiceFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import java.util.Map;
public class NoCryptoServiceFactory implements CryptoServiceFactory {
public static final CryptoService NONE = new NoCryptoService();
@Override
public CryptoService getService(CryptoEnvironment env, Map<String,String> properties) {
return NONE;
}
}
| 9,748 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/CryptoEnvironment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import java.util.Optional;
import org.apache.accumulo.core.data.TableId;
/**
* Useful information provided to the crypto implementation
*
* @since 2.0
*/
public interface CryptoEnvironment {
/**
* Where in Accumulo the on-disk file encryption takes place.
*/
enum Scope {
WAL, TABLE, RECOVERY
}
Scope getScope();
/**
* If in the TABLE scope, get the tableId. Will be empty in WAL scope.
*/
Optional<TableId> getTableId();
/**
* If decrypting files, get the params read from the file. Will be empty if encrypting.
*/
Optional<byte[]> getDecryptionParams();
}
| 9,749 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/CryptoService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import java.util.Map;
/**
* A self-contained cryptographic service. All on disk encryption and decryption will take place
* through this interface. Each implementation must implement a {@link FileEncrypter} for encryption
* and a {@link FileDecrypter} for decryption.
*
* @since 2.0
* @see org.apache.accumulo.core.spi
*/
public interface CryptoService {
/**
* Initialize CryptoService. This should only be called once.
*/
void init(Map<String,String> conf) throws CryptoException;
/**
* Initialize the FileEncrypter for the environment and return. This will get called once per
* R-File or Write Ahead Log. FileEncrypter implementation must be thread safe.
*/
FileEncrypter getFileEncrypter(CryptoEnvironment environment);
/**
* Initialize the FileDecrypter for the environment and return. This will get called once per
* R-File or Write Ahead Log. FileDecrypter implementation must be thread safe.
*/
FileDecrypter getFileDecrypter(CryptoEnvironment environment);
/**
* Runtime Crypto exception
*/
class CryptoException extends RuntimeException {
private static final long serialVersionUID = -7588781060677839664L;
public CryptoException() {}
public CryptoException(String message) {
super(message);
}
public CryptoException(String message, Throwable cause) {
super(message, cause);
}
public CryptoException(Throwable cause) {
super(cause);
}
}
}
| 9,750 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/FileEncrypter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import java.io.OutputStream;
/**
* Class implementation that will encrypt a file. Make sure implementation is thread safe.
*
* @since 2.0
*/
public interface FileEncrypter {
/**
* Encrypt the OutputStream.
*/
OutputStream encryptStream(OutputStream outputStream) throws CryptoService.CryptoException;
/**
* Get all the parameters required for decryption. WARNING: This byte[] will get written as part
* of the OutputStream as it is returned (either before or after the encrypted data). Do not
* return any unencrypted sensitive information.
*
* For example, return information about the encryption taking place such as version, class name
* or a wrapped File Encryption Key. This information will get written at the beginning of an
* encrypted Write Ahead Log (WAL) or at the end of an encrypted R-File. Later, it will be read
* from the file and passed to the {@link FileDecrypter} as part of {@link CryptoEnvironment} for
* everything it needs for decryption.
*/
byte[] getDecryptionParameters();
}
| 9,751 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/CryptoServiceFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import java.util.Map;
import org.apache.accumulo.core.classloader.ClassLoaderUtil;
/**
* A Factory that returns a CryptoService based on the environment and configuration.
*
* @since 2.1
*/
public interface CryptoServiceFactory {
/**
* Return the appropriate CryptoService.
*
* @param environment CryptoEnvironment containing a variety of information
* @param properties configuration
*
* @return CryptoService based on the environment and configuration
*/
CryptoService getService(CryptoEnvironment environment, Map<String,String> properties);
/**
* Loads a crypto service based on the name provided.
*/
default CryptoService newCryptoService(String cryptoServiceName) {
try {
return ClassLoaderUtil.loadClass(null, cryptoServiceName, CryptoService.class)
.getDeclaredConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
}
| 9,752 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/NoFileDecrypter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import java.io.InputStream;
public class NoFileDecrypter implements FileDecrypter {
@Override
public InputStream decryptStream(InputStream inputStream) throws CryptoService.CryptoException {
return inputStream;
}
}
| 9,753 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/AESCryptoService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.Key;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import javax.crypto.Cipher;
import javax.crypto.CipherInputStream;
import javax.crypto.CipherOutputStream;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.spec.GCMParameterSpec;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import org.apache.accumulo.core.crypto.streams.BlockedInputStream;
import org.apache.accumulo.core.crypto.streams.BlockedOutputStream;
import org.apache.accumulo.core.crypto.streams.DiscardCloseOutputStream;
import org.apache.accumulo.core.crypto.streams.RFileCipherOutputStream;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* Example implementation of AES encryption for Accumulo
*/
public class AESCryptoService implements CryptoService {
private static final Logger log = LoggerFactory.getLogger(AESCryptoService.class);
private volatile boolean initialized = false;
// properties required for using this service
public static final String KEY_URI_PROPERTY = "general.custom.crypto.key.uri";
// optional properties
// defaults to true
public static final String ENCRYPT_ENABLED_PROPERTY = "general.custom.crypto.enabled";
// Hard coded NoCryptoService.VERSION - this permits the removal of NoCryptoService from the
// core jar, allowing use of only one crypto service
private static final String NO_CRYPTO_VERSION = "U+1F47B";
private static final String URI = "uri";
private static final String KEY_WRAP_TRANSFORM = "AESWrap";
private Key encryptingKek = null;
private String keyLocation = null;
private String keyManager = null;
// Lets just load keks for reading once
private HashMap<String,Key> decryptingKeys = null;
private boolean encryptEnabled = true;
private static final FileEncrypter DISABLED = new NoFileEncrypter();
private static final ThreadLocal<Cipher> KEY_WRAP_CIPHER = new ThreadLocal<Cipher>() {
@SuppressFBWarnings(value = "CIPHER_INTEGRITY",
justification = "integrity not needed for key wrap")
@Override
protected Cipher initialValue() {
try {
return Cipher.getInstance(KEY_WRAP_TRANSFORM);
} catch (NoSuchAlgorithmException | NoSuchPaddingException e) {
throw new CryptoException("Error creating Cipher for AESWrap", e);
}
}
};
private static final ThreadLocal<Cipher> KEY_UNWRAP_CIPHER = new ThreadLocal<Cipher>() {
@SuppressFBWarnings(value = "CIPHER_INTEGRITY",
justification = "integrity not needed for key wrap")
@Override
protected Cipher initialValue() {
try {
return Cipher.getInstance(KEY_WRAP_TRANSFORM);
} catch (NoSuchAlgorithmException | NoSuchPaddingException e) {
throw new CryptoException("Error creating Cipher for AESWrap", e);
}
}
};
@Override
public void init(Map<String,String> conf) throws CryptoException {
ensureNotInit();
String keyLocation = Objects.requireNonNull(conf.get(KEY_URI_PROPERTY),
"Config property " + KEY_URI_PROPERTY + " is required.");
String enabledProp = conf.get(ENCRYPT_ENABLED_PROPERTY);
if (enabledProp != null) {
encryptEnabled = Boolean.parseBoolean(enabledProp);
}
// get key from URI for now, keyMgr framework could be expanded on in the future
String keyMgr = "uri";
this.decryptingKeys = new HashMap<>();
switch (keyMgr) {
case URI:
this.keyManager = keyMgr;
this.keyLocation = keyLocation;
this.encryptingKek = loadKekFromUri(keyLocation);
break;
default:
throw new CryptoException("Unrecognized key manager");
}
Objects.requireNonNull(this.encryptingKek,
"Encrypting Key Encryption Key was null, init failed");
log.debug("Successfully initialized crypto Key from {}", keyLocation);
initialized = true;
}
@Override
public FileEncrypter getFileEncrypter(CryptoEnvironment environment) {
ensureInit();
if (!encryptEnabled) {
return DISABLED;
}
CryptoModule cm;
switch (environment.getScope()) {
case WAL:
cm = new AESCBCCryptoModule(this.encryptingKek, this.keyLocation, this.keyManager);
return cm.getEncrypter();
case TABLE:
cm = new AESGCMCryptoModule(this.encryptingKek, this.keyLocation, this.keyManager);
return cm.getEncrypter();
default:
throw new CryptoException("Unknown scope: " + environment.getScope());
}
}
@Override
public FileDecrypter getFileDecrypter(CryptoEnvironment environment) {
ensureInit();
CryptoModule cm;
var decryptionParams = environment.getDecryptionParams();
if (decryptionParams.isEmpty() || checkNoCrypto(decryptionParams.orElseThrow())) {
return new NoFileDecrypter();
}
ParsedCryptoParameters parsed = parseCryptoParameters(decryptionParams.orElseThrow());
Key kek = loadDecryptionKek(parsed);
Key fek = unwrapKey(parsed.getEncFek(), kek);
switch (parsed.getCryptoServiceVersion()) {
case AESCBCCryptoModule.VERSION:
cm = new AESCBCCryptoModule(this.encryptingKek, this.keyLocation, this.keyManager);
return cm.getDecrypter(fek);
case AESGCMCryptoModule.VERSION:
cm = new AESGCMCryptoModule(this.encryptingKek, this.keyLocation, this.keyManager);
return cm.getDecrypter(fek);
default:
throw new CryptoException(
"Unknown crypto module version: " + parsed.getCryptoServiceVersion());
}
}
private static boolean checkNoCrypto(byte[] params) {
byte[] noCryptoBytes = NO_CRYPTO_VERSION.getBytes(UTF_8);
return Arrays.equals(params, noCryptoBytes);
}
static class ParsedCryptoParameters {
String cryptoServiceName;
String cryptoServiceVersion;
String keyManagerVersion;
String kekId;
byte[] encFek;
public void setCryptoServiceName(String cryptoServiceName) {
this.cryptoServiceName = cryptoServiceName;
}
public String getCryptoServiceVersion() {
return cryptoServiceVersion;
}
public void setCryptoServiceVersion(String cryptoServiceVersion) {
this.cryptoServiceVersion = cryptoServiceVersion;
}
public String getKeyManagerVersion() {
return keyManagerVersion;
}
public void setKeyManagerVersion(String keyManagerVersion) {
this.keyManagerVersion = keyManagerVersion;
}
public String getKekId() {
return kekId;
}
public void setKekId(String kekId) {
this.kekId = kekId;
}
public byte[] getEncFek() {
return encFek;
}
public void setEncFek(byte[] encFek) {
this.encFek = encFek;
}
}
private static byte[] createCryptoParameters(String version, Key encryptingKek,
String encryptingKekId, String encryptingKeyManager, Key fek) {
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream params = new DataOutputStream(baos)) {
params.writeUTF(AESCryptoService.class.getName());
params.writeUTF(version);
params.writeUTF(encryptingKeyManager);
params.writeUTF(encryptingKekId);
byte[] wrappedFek = wrapKey(fek, encryptingKek);
params.writeInt(wrappedFek.length);
params.write(wrappedFek);
params.flush();
return baos.toByteArray();
} catch (IOException e) {
throw new CryptoException("Error creating crypto params", e);
}
}
private static ParsedCryptoParameters parseCryptoParameters(byte[] parameters) {
ParsedCryptoParameters parsed = new ParsedCryptoParameters();
try (ByteArrayInputStream bais = new ByteArrayInputStream(parameters);
DataInputStream params = new DataInputStream(bais)) {
parsed.setCryptoServiceName(params.readUTF());
parsed.setCryptoServiceVersion(params.readUTF());
parsed.setKeyManagerVersion(params.readUTF());
parsed.setKekId(params.readUTF());
int encFekLen = params.readInt();
byte[] encFek = new byte[encFekLen];
int bytesRead = params.read(encFek);
if (bytesRead != encFekLen) {
throw new CryptoException("Incorrect number of bytes read for encrypted FEK");
}
parsed.setEncFek(encFek);
} catch (IOException e) {
throw new CryptoException("Error creating crypto params", e);
}
return parsed;
}
private Key loadDecryptionKek(ParsedCryptoParameters params) {
Key ret = null;
String keyTag = params.getKeyManagerVersion() + "!" + params.getKekId();
if (this.decryptingKeys.get(keyTag) != null) {
return this.decryptingKeys.get(keyTag);
}
switch (params.keyManagerVersion) {
case URI:
ret = loadKekFromUri(params.kekId);
break;
default:
throw new CryptoException("Unable to load kek: " + params.kekId);
}
this.decryptingKeys.put(keyTag, ret);
if (ret == null) {
throw new CryptoException("Unable to load decryption KEK");
}
return ret;
}
/**
* This interface lists the methods needed by CryptoModules which are responsible for tracking
* version and preparing encrypters/decrypters for use.
*/
private interface CryptoModule {
FileEncrypter getEncrypter();
FileDecrypter getDecrypter(Key fek);
}
public class AESGCMCryptoModule implements CryptoModule {
private static final String VERSION = "U+1F43B"; // unicode bear emoji rawr
private final Integer GCM_IV_LENGTH_IN_BYTES = 12;
private final Integer KEY_LENGTH_IN_BYTES = 16;
// 128-bit tags are the longest available for GCM
private final Integer GCM_TAG_LENGTH_IN_BITS = 16 * 8;
private final String transformation = "AES/GCM/NoPadding";
private boolean ivReused = false;
private final Key encryptingKek;
private final String keyLocation;
private final String keyManager;
public AESGCMCryptoModule(Key encryptingKek, String keyLocation, String keyManager) {
this.encryptingKek = encryptingKek;
this.keyLocation = keyLocation;
this.keyManager = keyManager;
}
@Override
public FileEncrypter getEncrypter() {
return new AESGCMFileEncrypter();
}
@Override
public FileDecrypter getDecrypter(Key fek) {
return new AESGCMFileDecrypter(fek);
}
public class AESGCMFileEncrypter implements FileEncrypter {
private final byte[] firstInitVector;
private final Key fek;
private final byte[] initVector = new byte[GCM_IV_LENGTH_IN_BYTES];
private final Cipher cipher;
private final byte[] decryptionParameters;
AESGCMFileEncrypter() {
try {
cipher = Cipher.getInstance(transformation);
} catch (NoSuchAlgorithmException | NoSuchPaddingException e) {
throw new CryptoException("Error obtaining cipher for transform " + transformation, e);
}
this.fek = generateKey(RANDOM.get(), KEY_LENGTH_IN_BYTES);
RANDOM.get().nextBytes(this.initVector);
this.firstInitVector = Arrays.copyOf(this.initVector, this.initVector.length);
this.decryptionParameters =
createCryptoParameters(VERSION, encryptingKek, keyLocation, keyManager, fek);
}
@Override
public OutputStream encryptStream(OutputStream outputStream) throws CryptoException {
if (ivReused) {
throw new CryptoException(
"Key/IV reuse is forbidden in AESGCMCryptoModule. Too many RBlocks.");
}
incrementIV(initVector, initVector.length - 1);
if (Arrays.equals(initVector, firstInitVector)) {
ivReused = true; // This will allow us to write the final block, since the
// initialization vector is always incremented before use.
}
// write IV before encrypting
try {
outputStream.write(initVector);
} catch (IOException e) {
throw new CryptoException("Unable to write IV to stream", e);
}
try {
cipher.init(Cipher.ENCRYPT_MODE, fek,
new GCMParameterSpec(GCM_TAG_LENGTH_IN_BITS, initVector));
} catch (InvalidKeyException | InvalidAlgorithmParameterException e) {
throw new CryptoException("Unable to initialize cipher", e);
}
RFileCipherOutputStream cos =
new RFileCipherOutputStream(new DiscardCloseOutputStream(outputStream), cipher);
// Prevent underlying stream from being closed with DiscardCloseOutputStream
// Without this, when the crypto stream is closed (in order to flush its last bytes)
// the underlying RFile stream will *also* be closed, and that's undesirable as the
// cipher stream is closed for every block written.
return new BlockedOutputStream(cos, cipher.getBlockSize(), 1024);
}
/**
* Because IVs can be longer than longs, this increments arbitrarily sized byte arrays by 1,
* with a roll over to 0 after the max value is reached.
*
* @param iv The iv to be incremented
* @param i The current byte being incremented
*/
void incrementIV(byte[] iv, int i) {
iv[i]++;
if (iv[i] == 0) {
if (i == 0) {
return;
} else {
incrementIV(iv, i - 1);
}
}
}
@Override
public byte[] getDecryptionParameters() {
return decryptionParameters;
}
}
public class AESGCMFileDecrypter implements FileDecrypter {
private final Cipher cipher;
private final Key fek;
AESGCMFileDecrypter(Key fek) {
try {
cipher = Cipher.getInstance(transformation);
} catch (NoSuchAlgorithmException | NoSuchPaddingException e) {
throw new CryptoException("Error obtaining cipher for transform " + transformation, e);
}
this.fek = fek;
}
@Override
public InputStream decryptStream(InputStream inputStream) throws CryptoException {
byte[] initVector = new byte[GCM_IV_LENGTH_IN_BYTES];
try {
IOUtils.readFully(inputStream, initVector);
} catch (IOException e) {
throw new CryptoException("Unable to read IV from stream", e);
}
try {
cipher.init(Cipher.DECRYPT_MODE, fek,
new GCMParameterSpec(GCM_TAG_LENGTH_IN_BITS, initVector));
} catch (InvalidKeyException | InvalidAlgorithmParameterException e) {
throw new CryptoException("Unable to initialize cipher", e);
}
CipherInputStream cis = new CipherInputStream(inputStream, cipher);
return new BlockedInputStream(cis, cipher.getBlockSize(), 1024);
}
}
}
public class AESCBCCryptoModule implements CryptoModule {
public static final String VERSION = "U+1f600"; // unicode grinning face emoji
private final Integer IV_LENGTH_IN_BYTES = 16;
private final Integer KEY_LENGTH_IN_BYTES = 16;
private final String transformation = "AES/CBC/NoPadding";
private final Key encryptingKek;
private final String keyLocation;
private final String keyManager;
public AESCBCCryptoModule(Key encryptingKek, String keyLocation, String keyManager) {
this.encryptingKek = encryptingKek;
this.keyLocation = keyLocation;
this.keyManager = keyManager;
}
@Override
public FileEncrypter getEncrypter() {
return new AESCBCFileEncrypter();
}
@Override
public FileDecrypter getDecrypter(Key fek) {
return new AESCBCFileDecrypter(fek);
}
@SuppressFBWarnings(value = "CIPHER_INTEGRITY", justification = "CBC is provided for WALs")
public class AESCBCFileEncrypter implements FileEncrypter {
private final Cipher cipher;
private final Key fek;
private final byte[] initVector = new byte[IV_LENGTH_IN_BYTES];
private final byte[] decryptionParameters;
AESCBCFileEncrypter() {
try {
cipher = Cipher.getInstance(transformation);
} catch (NoSuchAlgorithmException | NoSuchPaddingException e) {
throw new CryptoException("Error obtaining cipher for transform " + transformation, e);
}
this.fek = generateKey(RANDOM.get(), KEY_LENGTH_IN_BYTES);
this.decryptionParameters =
createCryptoParameters(VERSION, encryptingKek, keyLocation, keyManager, fek);
}
@Override
public OutputStream encryptStream(OutputStream outputStream) throws CryptoException {
RANDOM.get().nextBytes(initVector);
try {
outputStream.write(initVector);
} catch (IOException e) {
throw new CryptoException("Unable to write IV to stream", e);
}
try {
cipher.init(Cipher.ENCRYPT_MODE, fek, new IvParameterSpec(initVector));
} catch (InvalidKeyException | InvalidAlgorithmParameterException e) {
throw new CryptoException("Unable to initialize cipher", e);
}
CipherOutputStream cos = new CipherOutputStream(outputStream, cipher);
return new BlockedOutputStream(cos, cipher.getBlockSize(), 1024);
}
@Override
public byte[] getDecryptionParameters() {
return decryptionParameters;
}
}
@SuppressFBWarnings(value = "CIPHER_INTEGRITY", justification = "CBC is provided for WALs")
public class AESCBCFileDecrypter implements FileDecrypter {
private final Cipher cipher;
private final Key fek;
AESCBCFileDecrypter(Key fek) {
try {
cipher = Cipher.getInstance(transformation);
} catch (NoSuchAlgorithmException | NoSuchPaddingException e) {
throw new CryptoException("Error obtaining cipher for transform " + transformation, e);
}
this.fek = fek;
}
@Override
public InputStream decryptStream(InputStream inputStream) throws CryptoException {
byte[] initVector = new byte[IV_LENGTH_IN_BYTES];
try {
IOUtils.readFully(inputStream, initVector);
} catch (IOException e) {
throw new CryptoException("Unable to read IV from stream", e);
}
try {
cipher.init(Cipher.DECRYPT_MODE, fek, new IvParameterSpec(initVector));
} catch (InvalidKeyException | InvalidAlgorithmParameterException e) {
throw new CryptoException("Unable to initialize cipher", e);
}
CipherInputStream cis = new CipherInputStream(inputStream, cipher);
return new BlockedInputStream(cis, cipher.getBlockSize(), 1024);
}
}
}
public static Key generateKey(SecureRandom random, int size) {
byte[] bytes = new byte[size];
random.nextBytes(bytes);
return new SecretKeySpec(bytes, "AES");
}
public static Key unwrapKey(byte[] fek, Key kek) {
try {
final Cipher c = KEY_UNWRAP_CIPHER.get();
c.init(Cipher.UNWRAP_MODE, kek);
return c.unwrap(fek, "AES", Cipher.SECRET_KEY);
} catch (InvalidKeyException | NoSuchAlgorithmException e) {
throw new CryptoException("Unable to unwrap file encryption key", e);
}
}
public static byte[] wrapKey(Key fek, Key kek) {
try {
final Cipher c = KEY_WRAP_CIPHER.get();
c.init(Cipher.WRAP_MODE, kek);
return c.wrap(fek);
} catch (InvalidKeyException | IllegalBlockSizeException e) {
throw new CryptoException("Unable to wrap file encryption key", e);
}
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "keyId specified by admin")
public static Key loadKekFromUri(String keyId) {
try {
final java.net.URI uri = new URI(keyId);
return new SecretKeySpec(Files.readAllBytes(Paths.get(uri.getPath())), "AES");
} catch (URISyntaxException | IOException | IllegalArgumentException e) {
throw new CryptoException("Unable to load key encryption key.", e);
}
}
private void ensureInit() {
if (!initialized) {
throw new IllegalStateException("This Crypto Service has not been initialized.");
}
}
private void ensureNotInit() {
if (initialized) {
throw new IllegalStateException("This Crypto Service has already been initialized.");
}
}
}
| 9,754 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/GenericCryptoServiceFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import static org.apache.accumulo.core.conf.Property.GENERAL_ARBITRARY_PROP_PREFIX;
import static org.apache.accumulo.core.conf.Property.TABLE_CRYPTO_PREFIX;
import java.util.Map;
/**
* Factory that will load a crypto service configured, first checking
* {@link #GENERAL_SERVICE_NAME_PROP} and then {@link #TABLE_SERVICE_NAME_PROP}. Useful for general
* purpose on disk encryption, with no Table context.
*/
public class GenericCryptoServiceFactory implements CryptoServiceFactory {
public static final String GENERAL_SERVICE_NAME_PROP =
GENERAL_ARBITRARY_PROP_PREFIX + "crypto.service";
public static final String TABLE_SERVICE_NAME_PROP = TABLE_CRYPTO_PREFIX + "service";
@Override
public CryptoService getService(CryptoEnvironment environment, Map<String,String> properties) {
if (properties == null || properties.isEmpty()) {
return NoCryptoServiceFactory.NONE;
}
String cryptoServiceName = properties.get(GENERAL_SERVICE_NAME_PROP);
if (cryptoServiceName == null) {
cryptoServiceName = properties.get(TABLE_SERVICE_NAME_PROP);
if (cryptoServiceName == null) {
return NoCryptoServiceFactory.NONE;
}
}
var cs = newCryptoService(cryptoServiceName);
cs.init(properties);
return cs;
}
}
| 9,755 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/NoFileEncrypter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.OutputStream;
public class NoFileEncrypter implements FileEncrypter {
@Override
public OutputStream encryptStream(OutputStream outputStream)
throws CryptoService.CryptoException {
return outputStream;
}
@Override
public byte[] getDecryptionParameters() {
return NoCryptoService.VERSION.getBytes(UTF_8);
}
}
| 9,756 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/FileDecrypter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import java.io.InputStream;
/**
* Class implementation that will decrypt a file. Make sure implementation is thread safe.
*
* @since 2.0
*/
public interface FileDecrypter {
/**
* Decrypt the InputStream
*/
InputStream decryptStream(InputStream inputStream) throws CryptoService.CryptoException;
}
| 9,757 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/crypto/NoCryptoService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.crypto;
import java.util.Map;
/**
* The default encryption strategy which does nothing.
*/
public class NoCryptoService implements CryptoService {
public static final String VERSION = "U+1F47B"; // unicode ghost emoji
@Override
public void init(Map<String,String> conf) throws CryptoException {
// do nothing
}
@Override
public FileEncrypter getFileEncrypter(CryptoEnvironment environment) {
return new NoFileEncrypter();
}
@Override
public FileDecrypter getFileDecrypter(CryptoEnvironment environment) {
return new NoFileDecrypter();
}
}
| 9,758 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/cache/CacheType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.cache;
/**
* @since 2.0.0
*/
public enum CacheType {
DATA, INDEX, SUMMARY
}
| 9,759 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/cache/CacheEntry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.cache;
import java.util.function.Supplier;
/**
* @since 2.0.0
*/
public interface CacheEntry {
interface Weighable {
int weight();
}
byte[] getBuffer();
/**
* Optionally cache what is returned by the supplier along with this cache entry. If caching what
* is returned by the supplier is not supported, its ok to return null.
*
* <p>
* This method exists to support building indexes of frequently accessed cached data.
*/
<T extends Weighable> T getIndex(Supplier<T> supplier);
/**
* The object optionally stored by {@link #getIndex(Supplier)} is a mutable object. Accumulo will
* call this method whenever the weight of that object changes.
*/
void indexWeightChanged();
}
| 9,760 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/cache/BlockCache.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.cache;
import java.util.Map;
import org.apache.accumulo.core.file.blockfile.cache.lru.SynchronousLoadingBlockCache;
/**
* Block cache interface.
*
* @since 2.0.0
* @see org.apache.accumulo.core.spi
*/
public interface BlockCache {
/**
* Add block to cache.
*
* @param blockName Zero-based file block number.
* @param buf The block contents wrapped in a ByteBuffer.
*/
CacheEntry cacheBlock(String blockName, byte[] buf);
/**
* Fetch block from cache.
*
* @param blockName Block name to fetch.
* @return Block or null if block is not in the cache.
*/
CacheEntry getBlock(String blockName);
interface Loader {
/**
* The cache blocks that this loader depends on. If a loader has no dependencies, then it should
* return an empty map. All dependencies must be loaded before calling {@link #load(int, Map)}.
*/
Map<String,Loader> getDependencies();
/**
* Loads a block. Anything returned by {@link #getDependencies()} should be loaded and passed.
*
* @param maxSize This is the maximum block size that will be cached.
* @return The loaded block or null if loading the block would exceed maxSize.
*/
byte[] load(int maxSize, Map<String,byte[]> dependencies);
}
/**
* This method allows a cache to prevent concurrent loads of the same block. However a cache
* implementation is not required to prevent concurrent loads.
* {@link SynchronousLoadingBlockCache} is an abstract class that a cache can extent which does
* prevent concurrent loading of the same block.
*
*
* @param blockName Block name to fetch
* @param loader If the block is not present in the cache, the loader can be called to load it.
* @return Block or null if block is not in the cache or didn't load.
*/
CacheEntry getBlock(String blockName, Loader loader);
/**
* Get the maximum amount of on heap memory this cache will use.
*/
long getMaxHeapSize();
/**
* Get the maximum size of this cache.
*
* @return max size in bytes
*/
long getMaxSize();
/**
* Get the statistics of this cache.
*
* @return statistics
*/
Stats getStats();
/** Cache statistics. */
interface Stats {
/**
* Returns the number of lookups that have returned a cached value.
*
* @return the number of lookups that have returned a cached value
*/
long hitCount();
/**
* Returns the number of times the lookup methods have returned either a cached or uncached
* value.
*
* @return the number of lookups
*/
long requestCount();
}
}
| 9,761 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/cache/BlockCacheManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.cache;
import java.util.HashMap;
import java.util.Map;
/**
* @since 2.0.0
* @see org.apache.accumulo.core.spi
*/
public abstract class BlockCacheManager {
private final Map<CacheType,BlockCache> caches = new HashMap<>();
public interface Configuration {
/**
* Before Accumulo's cache implementation was configurable, its built in caches had a
* configurable size. These sizes were specified by the system properties
* {@code tserver.cache.config.data.size}, {@code tserver.cache.config.index.size}, and {@code
* tserver.cache.config.summary.size}. This method returns the values of those settings. The
* settings are made available, but cache implementations are under no obligation to use them.
*
* <p>
* When this plugin is running in a scan server, the value associated with
* {@code sserver.cache.config.data.size}, {@code sserver.cache.config.index.size}, and
* {@code sserver.cache.config.summary.size} are returned instead of tserver values.
*
*/
long getMaxSize(CacheType type);
/**
* Before Accumulo's cache implementation was configurable, its built in cache had a
* configurable block size. This block size was specified by the system property
* {@code tserver.default.blocksize}. This method returns the value of that setting. The setting
* is made available, but cache implementations are under no obligation to use it.
*
* <p>
* When this plugin is running in scan server, the value associated with
* {@code sserver.default.blocksize} is returned instead.
*
*/
long getBlockSize();
/**
* This method provides a way for a cache implementation to access arbitrary configuration set
* by a user.
*
* <p>
* When running in a tserver, returns all Accumulo properties that have a prefix of
* {@code tserver.cache.config.<prefix>.<type>.} or
* {@code tserver.cache.config.<prefix>.default.} with values for specific cache types
* overriding defaults.
*
* <p>
* When running in a scan server, returns all Accumulo properties that have a prefix of
* {@code sserver.cache.config.<prefix>.<type>.} or
* {@code sserver.cache.config.<prefix>.default.} with values for specific cache types
* overriding defaults.
*
* <p>
* For example assume the following data is in Accumulo's system config and the plugin is
* running in a tserver.
*
* <pre>
* tserver.cache.config.lru.default.evictAfter=3600
* tserver.cache.config.lru.default.loadFactor=.75
* tserver.cache.config.lru.index.loadFactor=.55
* tserver.cache.config.lru.data.loadFactor=.65
* </pre>
*
* <p>
* If this method is called with {@code prefix=lru} and {@code type=INDEX} then it would return
* a map with the following key values. The load factor setting for index overrides the default
* value.
*
* <pre>
* evictAfter=3600
* loadFactor=.55
* </pre>
*
* @param prefix A unique identifier that corresponds to a particular BlockCacheManager
* implementation.
*/
Map<String,String> getProperties(String prefix, CacheType type);
}
/**
* Initialize the caches for each CacheType based on the configuration
*
* @param conf accumulo configuration
*/
public void start(Configuration conf) {
for (CacheType type : CacheType.values()) {
BlockCache cache = this.createCache(conf, type);
this.caches.put(type, cache);
}
}
/**
* Stop caches and release resources
*/
public void stop() {
this.caches.clear();
}
/**
* Get the block cache of the given type
*
* @param type block cache type
* @return BlockCache or null if not enabled
*/
public BlockCache getBlockCache(CacheType type) {
return caches.get(type);
}
/**
* Create a block cache using the supplied configuration
*
* @param conf cache configuration
* @return configured block cache
*/
protected abstract BlockCache createCache(Configuration conf, CacheType type);
}
| 9,762 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile/compression/ZStandard.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.file.rfile.compression;
public class ZStandard implements CompressionAlgorithmConfiguration {
@Override
public String getName() {
return "zstd";
}
@Override
public String getCodecClassName() {
return "org.apache.hadoop.io.compress.ZStandardCodec";
}
@Override
public String getCodecClassNameProperty() {
return "io.compression.codec.zstd.class";
}
@Override
public int getDefaultBufferSize() {
return 64 * 1024;
}
@Override
public String getBufferSizeProperty() {
return "io.compression.codec.zstd.buffersize";
}
@Override
public boolean cacheCodecsWithNonDefaultSizes() {
return true;
}
}
| 9,763 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile/compression/NoCompression.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.file.rfile.compression;
import org.apache.accumulo.core.file.rfile.bcfile.IdentityCodec;
public class NoCompression implements CompressionAlgorithmConfiguration {
@Override
public String getName() {
return "none";
}
@Override
public String getCodecClassName() {
return IdentityCodec.class.getName();
}
@Override
public String getCodecClassNameProperty() {
return null;
}
@Override
public int getDefaultBufferSize() {
return 32 * 1024;
}
@Override
public String getBufferSizeProperty() {
return "io.file.buffer.size";
}
}
| 9,764 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile/compression/Snappy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.file.rfile.compression;
public class Snappy implements CompressionAlgorithmConfiguration {
@Override
public String getName() {
return "snappy";
}
@Override
public String getCodecClassName() {
return "org.apache.hadoop.io.compress.SnappyCodec";
}
@Override
public String getCodecClassNameProperty() {
return "io.compression.codec.snappy.class";
}
@Override
public int getDefaultBufferSize() {
return 64 * 1024;
}
@Override
public String getBufferSizeProperty() {
return "io.compression.codec.snappy.buffersize";
}
@Override
public boolean cacheCodecsWithNonDefaultSizes() {
return true;
}
}
| 9,765 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile/compression/Gz.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.file.rfile.compression;
public class Gz implements CompressionAlgorithmConfiguration {
@Override
public String getName() {
return "gz";
}
@Override
public String getCodecClassName() {
return "org.apache.hadoop.io.compress.DefaultCodec";
}
@Override
public String getCodecClassNameProperty() {
return null;
}
@Override
public int getDefaultBufferSize() {
return 32 * 1024;
}
@Override
public String getBufferSizeProperty() {
return "io.file.buffer.size";
}
@Override
public boolean cacheCodecsWithNonDefaultSizes() {
return true;
}
}
| 9,766 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile/compression/Lzo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.file.rfile.compression;
public class Lzo implements CompressionAlgorithmConfiguration {
@Override
public String getName() {
return "lzo";
}
@Override
public String getCodecClassName() {
return "org.apache.hadoop.io.compress.LzoCodec";
}
@Override
public String getCodecClassNameProperty() {
return "io.compression.codec.lzo.class";
}
@Override
public int getDefaultBufferSize() {
return 64 * 1024;
}
@Override
public String getBufferSizeProperty() {
return "io.compression.codec.lzo.buffersize";
}
}
| 9,767 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile/compression/Lz4.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.file.rfile.compression;
public class Lz4 implements CompressionAlgorithmConfiguration {
@Override
public String getName() {
return "lz4";
}
@Override
public String getCodecClassName() {
return "org.apache.hadoop.io.compress.Lz4Codec";
}
@Override
public String getCodecClassNameProperty() {
return "io.compression.codec.lz4.class";
}
@Override
public int getDefaultBufferSize() {
return 256 * 1024;
}
@Override
public String getBufferSizeProperty() {
return "io.compression.codec.lz4.buffersize";
}
}
| 9,768 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile/compression/CompressionAlgorithmConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.file.rfile.compression;
public interface CompressionAlgorithmConfiguration {
/**
* @return algorithm alias
*/
String getName();
/**
* @return name of property that can be specified in configuration or in system properties to
* override class name of codec
*/
String getCodecClassName();
/**
* @return fully qualified class name of codec
*/
String getCodecClassNameProperty();
/**
* @return default buffer size for compression algorithm
*/
int getDefaultBufferSize();
/**
* @return name of property that can be specified in configuration or in system properties to
* override default buffer size
*/
String getBufferSizeProperty();
/**
* @return true if codecs with non-default buffer sizes should be cached
*/
default boolean cacheCodecsWithNonDefaultSizes() {
return false;
}
}
| 9,769 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/file/rfile/compression/Bzip2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.file.rfile.compression;
public class Bzip2 implements CompressionAlgorithmConfiguration {
@Override
public String getName() {
return "bzip2";
}
@Override
public String getCodecClassName() {
return "org.apache.hadoop.io.compress.BZip2Codec";
}
@Override
public String getCodecClassNameProperty() {
return "io.compression.codec.bzip2.class";
}
@Override
public int getDefaultBufferSize() {
return 64 * 1024;
}
@Override
public String getBufferSizeProperty() {
return "io.file.buffer.size";
}
}
| 9,770 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionKind.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import org.apache.accumulo.core.client.admin.compaction.CompactionSelector;
/**
* @since 2.1.0
* @see org.apache.accumulo.core.spi.compaction
*/
public enum CompactionKind {
/**
* A system initiated routine compaction.
*/
SYSTEM,
/**
* Set of files selected by a {@link CompactionSelector} configured for a table.
*/
SELECTOR,
/**
* A user initiated a one time compaction using an Accumulo client.
*/
USER
}
| 9,771 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/DefaultCompactionPlanner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import static org.apache.accumulo.core.util.LazySingletons.GSON;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.accumulo.core.util.compaction.CompactionJobPrioritizer;
import com.google.common.base.Preconditions;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* Finds the largest continuous set of small files that meet the compaction ratio and do not prevent
* future compactions.
*
* <p>
* The following configuration options are supported. Replace {@code <service>} with the name of the
* compaction service you are configuring.
*
* <ul>
* <li>{@code tserver.compaction.major.service.<service>.opts.executors} This is a json array of
* objects where each object has the fields:
* <table>
* <caption>Default Compaction Planner Executor options</caption>
* <tr>
* <th>Field Name</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>name</td>
* <td>name or alias of the executor (required)</td>
* </tr>
* <tr>
* <td>type</td>
* <td>valid values 'internal' or 'external' (required)</td>
* </tr>
* <tr>
* <td>maxSize</td>
* <td>threshold sum of the input files (required for all but one of the configs)</td>
* </tr>
* <tr>
* <td>numThreads</td>
* <td>number of threads for this executor configuration (required for 'internal', cannot be
* specified for 'external')</td>
* </tr>
* <tr>
* <td>queue</td>
* <td>name of the external compaction queue (required for 'external', cannot be specified for
* 'internal')</td>
* </tr>
* </table>
* <br>
* The maxSize field determines the maximum size of compaction that will run on an executor. The
* maxSize field can have a suffix of K,M,G for kilobytes, megabytes, or gigabytes and represents
* the sum of the input files for a given compaction. One executor can have no max size and it will
* run everything that is too large for the other executors. If all executors have a max size, then
* system compactions will only run for compactions smaller than the largest max size. User, chop,
* and selector compactions will always run, even if there is no executor for their size. These
* compactions will run on the executor with the largest max size. The following example value for
* this property will create 3 threads to run compactions of files whose file size sum is less than
* 100M, 3 threads to run compactions of files whose file size sum is less than 500M, and run all
* other compactions on Compactors configured to run compactions for Queue1:
*
* <pre>
* {@code
* [
* {"name":"small", "type": "internal", "maxSize":"100M","numThreads":3},
* {"name":"medium", "type": "internal", "maxSize":"500M","numThreads":3},
* {"name": "large", "type": "external", "queue", "Queue1"}
* ]}
* </pre>
*
* Note that the use of 'external' requires that the CompactionCoordinator and at least one
* Compactor for Queue1 is running.
* <li>{@code tserver.compaction.major.service.<service>.opts.maxOpen} This determines the maximum
* number of files that will be included in a single compaction.
* </ul>
*
* @since 2.1.0
* @see org.apache.accumulo.core.spi.compaction
*/
public class DefaultCompactionPlanner implements CompactionPlanner {
private static class ExecutorConfig {
String type;
String name;
String maxSize;
Integer numThreads;
String queue;
}
private static class Executor {
final CompactionExecutorId ceid;
final Long maxSize;
public Executor(CompactionExecutorId ceid, Long maxSize) {
Preconditions.checkArgument(maxSize == null || maxSize > 0, "Invalid value for maxSize");
this.ceid = Objects.requireNonNull(ceid, "Compaction ID is null");
this.maxSize = maxSize;
}
Long getMaxSize() {
return maxSize;
}
@Override
public String toString() {
return "[ceid=" + ceid + ", maxSize=" + maxSize + "]";
}
}
private List<Executor> executors;
private int maxFilesToCompact;
@SuppressFBWarnings(value = {"UWF_UNWRITTEN_FIELD", "NP_UNWRITTEN_FIELD"},
justification = "Field is written by Gson")
@Override
public void init(InitParameters params) {
ExecutorConfig[] execConfigs =
GSON.get().fromJson(params.getOptions().get("executors"), ExecutorConfig[].class);
List<Executor> tmpExec = new ArrayList<>();
for (ExecutorConfig executorConfig : execConfigs) {
Long maxSize = executorConfig.maxSize == null ? null
: ConfigurationTypeHelper.getFixedMemoryAsBytes(executorConfig.maxSize);
CompactionExecutorId ceid;
// If not supplied, GSON will leave type null. Default to internal
if (executorConfig.type == null) {
executorConfig.type = "internal";
}
switch (executorConfig.type) {
case "internal":
Preconditions.checkArgument(null == executorConfig.queue,
"'queue' should not be specified for internal compactions");
int numThreads = Objects.requireNonNull(executorConfig.numThreads,
"'numThreads' must be specified for internal type");
ceid = params.getExecutorManager().createExecutor(executorConfig.name, numThreads);
break;
case "external":
Preconditions.checkArgument(null == executorConfig.numThreads,
"'numThreads' should not be specified for external compactions");
String queue = Objects.requireNonNull(executorConfig.queue,
"'queue' must be specified for external type");
ceid = params.getExecutorManager().getExternalExecutor(queue);
break;
default:
throw new IllegalArgumentException("type must be 'internal' or 'external'");
}
tmpExec.add(new Executor(ceid, maxSize));
}
Collections.sort(tmpExec, Comparator.comparing(Executor::getMaxSize,
Comparator.nullsLast(Comparator.naturalOrder())));
executors = List.copyOf(tmpExec);
if (executors.stream().filter(e -> e.getMaxSize() == null).count() > 1) {
throw new IllegalArgumentException(
"Can only have one executor w/o a maxSize. " + params.getOptions().get("executors"));
}
// use the add method on the Set interface to check for duplicate maxSizes
Set<Long> maxSizes = new HashSet<>();
executors.forEach(e -> {
if (!maxSizes.add(e.getMaxSize())) {
throw new IllegalArgumentException(
"Duplicate maxSize set in executors. " + params.getOptions().get("executors"));
}
});
determineMaxFilesToCompact(params);
}
private void determineMaxFilesToCompact(InitParameters params) {
this.maxFilesToCompact = Integer.parseInt(params.getOptions().getOrDefault("maxOpen", "10"));
}
@Override
public CompactionPlan makePlan(PlanningParameters params) {
if (params.getCandidates().isEmpty()) {
return params.createPlanBuilder().build();
}
Set<CompactableFile> filesCopy = new HashSet<>(params.getCandidates());
long maxSizeToCompact = getMaxSizeToCompact(params.getKind());
Collection<CompactableFile> group;
if (params.getRunningCompactions().isEmpty()) {
group =
findDataFilesToCompact(filesCopy, params.getRatio(), maxFilesToCompact, maxSizeToCompact);
if (!group.isEmpty() && group.size() < params.getCandidates().size()
&& params.getCandidates().size() <= maxFilesToCompact
&& (params.getKind() == CompactionKind.USER
|| params.getKind() == CompactionKind.SELECTOR)) {
// USER and SELECTOR compactions must eventually compact all files. When a subset of files
// that meets the compaction ratio is selected, look ahead and see if the next compaction
// would also meet the compaction ratio. If not then compact everything to avoid doing
// more than logarithmic work across multiple comapctions.
filesCopy.removeAll(group);
filesCopy.add(getExpected(group, 0));
if (findDataFilesToCompact(filesCopy, params.getRatio(), maxFilesToCompact,
maxSizeToCompact).isEmpty()) {
// The next possible compaction does not meet the compaction ratio, so compact
// everything.
group = Set.copyOf(params.getCandidates());
}
}
} else if (params.getKind() == CompactionKind.SYSTEM) {
// This code determines if once the files compacting finish would they be included in a
// compaction with the files smaller than them? If so, then wait for the running compaction
// to complete.
// The set of files running compactions may produce
var expectedFiles = getExpected(params.getRunningCompactions());
if (!Collections.disjoint(filesCopy, expectedFiles)) {
throw new AssertionError();
}
filesCopy.addAll(expectedFiles);
group =
findDataFilesToCompact(filesCopy, params.getRatio(), maxFilesToCompact, maxSizeToCompact);
if (!Collections.disjoint(group, expectedFiles)) {
// file produced by running compaction will eventually compact with existing files, so
// wait.
group = Set.of();
}
} else {
group = Set.of();
}
if (group.isEmpty()
&& (params.getKind() == CompactionKind.USER || params.getKind() == CompactionKind.SELECTOR)
&& params.getRunningCompactions().stream()
.noneMatch(job -> job.getKind() == params.getKind())) {
group = findMaximalRequiredSetToCompact(params.getCandidates(), maxFilesToCompact);
}
if (group.isEmpty()) {
return params.createPlanBuilder().build();
} else {
// determine which executor to use based on the size of the files
var ceid = getExecutor(group);
return params.createPlanBuilder().addJob(createPriority(params, group), ceid, group).build();
}
}
private static short createPriority(PlanningParameters params,
Collection<CompactableFile> group) {
return CompactionJobPrioritizer.createPriority(params.getKind(), params.getAll().size(),
group.size());
}
private long getMaxSizeToCompact(CompactionKind kind) {
if (kind == CompactionKind.SYSTEM) {
Long max = executors.get(executors.size() - 1).maxSize;
if (max != null) {
return max;
}
}
return Long.MAX_VALUE;
}
private CompactableFile getExpected(Collection<CompactableFile> files, int count) {
long size = files.stream().mapToLong(CompactableFile::getEstimatedSize).sum();
try {
return CompactableFile.create(
new URI("hdfs://fake/accumulo/tables/adef/t-zzFAKEzz/FAKE-0000" + count + ".rf"), size,
0);
} catch (URISyntaxException e) {
throw new IllegalStateException(e);
}
}
/**
* @return the expected files sizes for sets of compacting files.
*/
private Set<CompactableFile> getExpected(Collection<CompactionJob> compacting) {
Set<CompactableFile> expected = new HashSet<>();
int count = 0;
for (CompactionJob job : compacting) {
count++;
expected.add(getExpected(job.getFiles(), count));
}
return expected;
}
private static Collection<CompactableFile>
findMaximalRequiredSetToCompact(Collection<CompactableFile> files, int maxFilesToCompact) {
if (files.size() <= maxFilesToCompact) {
return files;
}
List<CompactableFile> sortedFiles = sortByFileSize(files);
int numToCompact = maxFilesToCompact;
if (sortedFiles.size() > maxFilesToCompact && sortedFiles.size() < 2 * maxFilesToCompact) {
// on the second to last compaction pass, compact the minimum amount of files possible
numToCompact = sortedFiles.size() - maxFilesToCompact + 1;
}
return sortedFiles.subList(0, numToCompact);
}
static Collection<CompactableFile> findDataFilesToCompact(Set<CompactableFile> files,
double ratio, int maxFilesToCompact, long maxSizeToCompact) {
if (files.size() <= 1) {
return Collections.emptySet();
}
// sort files from smallest to largest. So position 0 has the smallest file.
List<CompactableFile> sortedFiles = sortByFileSize(files);
int maxSizeIndex = sortedFiles.size();
long sum = 0;
for (int i = 0; i < sortedFiles.size(); i++) {
sum += sortedFiles.get(i).getEstimatedSize();
if (sum > maxSizeToCompact) {
maxSizeIndex = i;
break;
}
}
if (maxSizeIndex < sortedFiles.size()) {
sortedFiles = sortedFiles.subList(0, maxSizeIndex);
if (sortedFiles.size() <= 1) {
return Collections.emptySet();
}
}
var loops = Math.max(1, sortedFiles.size() - maxFilesToCompact + 1);
for (int i = 0; i < loops; i++) {
var filesToCompact = findDataFilesToCompact(
sortedFiles.subList(i, Math.min(sortedFiles.size(), maxFilesToCompact) + i), ratio);
if (!filesToCompact.isEmpty()) {
return filesToCompact;
}
}
return Collections.emptySet();
}
/**
* Find the largest set of contiguous small files that meet the compaction ratio. For a set of
* file size like [101M,102M,103M,104M,4M,3M,3M,3M,3M], it would be nice compact the smaller files
* [4M,3M,3M,3M,3M] followed by the larger ones. The reason to do the smaller ones first is to
* more quickly reduce the number of files. However, all compactions should still follow the
* compaction ratio in order to ensure the amount of data rewriting is logarithmic.
*
* <p>
* A set of files meets the compaction ratio when the largestFileinSet * compactionRatio <
* sumOfFileSizesInSet. This algorithm grows the set of small files until it meets the compaction
* ratio, then keeps growing it while it continues to meet the ratio. Once a set does not meet the
* compaction ratio, the last set that did is returned. Growing the set of small files means
* adding the smallest file not in the set.
*
* <p>
* There is one caveat to the algorithm mentioned above, if a smaller set of files would prevent a
* future compaction then do not select it. This code in this function performs a look ahead to
* see if a candidate set will prevent future compactions.
*
* <p>
* As an example of a small set of files that could prevent a future compaction, consider the
* files sizes [100M,99M,33M,33M,33M,33M]. For a compaction ratio of 3, the set
* [100M,99M,33M,33M,33M,33M] and [33M,33M,33M,33M] both meet the compaction ratio. If the set
* [33M,33M,33M,33M] is compacted, then it will result in a tablet having [132M, 100M, 99M] which
* does not meet the compaction ration. So in this case, choosing the set [33M,33M,33M,33M]
* prevents a future compaction that could have occurred. This function will not choose the
* smaller set because of it would prevent the future compaction.
*/
private static Collection<CompactableFile>
findDataFilesToCompact(List<CompactableFile> sortedFiles, double ratio) {
int larsmaIndex = -1;
long larsmaSum = Long.MIN_VALUE;
// index into sortedFiles, everything at and below this index meets the compaction ratio
int goodIndex = -1;
long sum = sortedFiles.get(0).getEstimatedSize();
for (int c = 1; c < sortedFiles.size(); c++) {
long currSize = sortedFiles.get(c).getEstimatedSize();
// ensure data is sorted
Preconditions.checkArgument(currSize >= sortedFiles.get(c - 1).getEstimatedSize());
sum += currSize;
if (currSize * ratio < sum) {
goodIndex = c;
} else if (c - 1 == goodIndex) {
// The previous file met the compaction ratio, but the current file does not. So all of the
// previous files are candidates. However we must ensure that any candidate set produces a
// file smaller than the next largest file in the next candidate set to ensure future
// compactions are not prevented.
if (larsmaIndex == -1 || larsmaSum > sortedFiles.get(goodIndex).getEstimatedSize()) {
larsmaIndex = goodIndex;
larsmaSum = sum - currSize;
} else {
break;
}
}
}
if (sortedFiles.size() - 1 == goodIndex
&& (larsmaIndex == -1 || larsmaSum > sortedFiles.get(goodIndex).getEstimatedSize())) {
larsmaIndex = goodIndex;
}
if (larsmaIndex == -1) {
return Collections.emptySet();
}
return sortedFiles.subList(0, larsmaIndex + 1);
}
CompactionExecutorId getExecutor(Collection<CompactableFile> files) {
long size = files.stream().mapToLong(CompactableFile::getEstimatedSize).sum();
for (Executor executor : executors) {
if (executor.maxSize == null || size < executor.maxSize) {
return executor.ceid;
}
}
return executors.get(executors.size() - 1).ceid;
}
private static List<CompactableFile> sortByFileSize(Collection<CompactableFile> files) {
ArrayList<CompactableFile> sortedFiles = new ArrayList<>(files);
// sort from smallest file to largest
Collections.sort(sortedFiles, Comparator.comparingLong(CompactableFile::getEstimatedSize)
.thenComparing(CompactableFile::getUri));
return sortedFiles;
}
}
| 9,772 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionServices.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import java.util.Set;
/**
* Provider of information about configured compaction services.
*
* @since 2.1.0
*/
public interface CompactionServices {
Set<CompactionServiceId> getIds();
}
| 9,773 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionDispatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import java.util.Map;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import com.google.common.base.Preconditions;
/**
* Can be configured per table to dispatch compactions to different compaction services. For a given
* table the dispatcher can choose a different compaction service for each kind of compaction. For
* example, user and chop compactions could be dispatched to service_A while system compactions are
* dispatched to service_B.
*
* @since 2.1.0
* @see org.apache.accumulo.core.spi.compaction
*/
public interface CompactionDispatcher {
/**
* The method parameters for {@link CompactionDispatcher#init(InitParameters)}. This interface
* exists so the API can evolve and additional parameters can be passed to the method in the
* future.
*
* @since 2.1.0
*/
public interface InitParameters {
/**
*
* @return The configured options. For example if the table properties
* {@code table.compaction.dispatcher.opts.p1=abc} and
* {@code table.compaction.dispatcher.opts.p9=123} were set, then this map would contain
* {@code p1=abc} and {@code p9=123}.
*/
Map<String,String> getOptions();
TableId getTableId();
ServiceEnvironment getServiceEnv();
}
/**
* This method is called once after a CompactionDispatcher is instantiated.
*/
default void init(InitParameters params) {
Preconditions.checkArgument(params.getOptions().isEmpty(), "No options expected");
}
/**
* The method parameters for {@link CompactionDispatcher#dispatch(DispatchParameters)}. This
* interface exists so the API can evolve and additional parameters can be passed to the method in
* the future.
*
* @since 2.1.0
*/
public interface DispatchParameters {
/**
* @return the currently configured compaction services
*/
CompactionServices getCompactionServices();
ServiceEnvironment getServiceEnv();
CompactionKind getCompactionKind();
Map<String,String> getExecutionHints();
}
/**
* Accumulo calls this method for compactions to determine what service to use.
*/
CompactionDispatch dispatch(DispatchParameters params);
}
| 9,774 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionDispatch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
/**
* The dispatch of a {@link CompactionDispatcher}
*
* @since 2.1.0
* @see org.apache.accumulo.core.spi.compaction
*/
public interface CompactionDispatch {
/**
* @return The service where a compaction should run.
*/
CompactionServiceId getService();
/**
* Required service parameter for {@link CompactionDispatch}
*
* @since 2.1.0
*/
interface ServiceBuilder {
Builder toService(CompactionServiceId service);
Builder toService(String compactionServiceId);
}
/**
* @since 2.1.0
*/
interface Builder {
CompactionDispatch build();
}
static ServiceBuilder builder() {
return new CompactionDispatchBuilder();
}
}
| 9,775 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/SimpleCompactionDispatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.Map;
import org.apache.accumulo.core.client.admin.CompactionConfig;
/**
* Dispatcher that supports simple configuration for making tables use compaction services. By
* default it dispatches to a compaction service named default.
*
* <p>
* The following schema is supported for configuration options.
*
* <p>
* {@code table.compaction.dispatcher.opts.service[.user[.<user type>]|selected|system|chop]=
* <service>}
*
* <p>
* The following configuration will make a table use compaction service cs9 for user compactions,
* service cs4 for chop compactions, and service cs7 for everything else.
*
* <p>
* {@code
* table.compaction.dispatcher.opts.service=cs7
* table.compaction.dispatcher.opts.service.user=cs9
* table.compaction.dispatcher.opts.service.chop=cs4
* }
*
* <p>
* Compactions started using the client API are called user compactions and can set execution hints
* using {@link CompactionConfig#setExecutionHints(Map)}. Hints of the form
* {@code compaction_type=<user type>} can be used by this dispatcher. For example the following
* will use service cs2 when the hint {@code compaction_type=urgent} is seen, service cs3 when hint
* {@code compaction_type=trifling}, everything else uses cs9.
*
* <p>
* {@code
* table.compaction.dispatcher.opts.service=cs9
* table.compaction.dispatcher.opts.service.user.urgent=cs2
* table.compaction.dispatcher.opts.service.user.trifling=cs3
* }
*
* @see org.apache.accumulo.core.spi.compaction
*/
public class SimpleCompactionDispatcher implements CompactionDispatcher {
private Map<CompactionKind,CompactionDispatch> services;
private Map<String,CompactionDispatch> userServices;
@Override
public void init(InitParameters params) {
services = new EnumMap<>(CompactionKind.class);
var defaultService = CompactionDispatch.builder().toService("default").build();
if (params.getOptions().containsKey("service")) {
defaultService =
CompactionDispatch.builder().toService(params.getOptions().get("service")).build();
}
for (CompactionKind ctype : CompactionKind.values()) {
String service = params.getOptions().get("service." + ctype.name().toLowerCase());
if (service == null) {
services.put(ctype, defaultService);
} else {
services.put(ctype, CompactionDispatch.builder().toService(service).build());
}
}
if (params.getOptions().isEmpty()) {
userServices = Map.of();
} else {
Map<String,CompactionDispatch> tmpUS = new HashMap<>();
params.getOptions().forEach((k, v) -> {
if (k.startsWith("service.user.")) {
String type = k.substring("service.user.".length());
tmpUS.put(type, CompactionDispatch.builder().toService(v).build());
}
});
userServices = Map.copyOf(tmpUS);
}
}
@Override
public CompactionDispatch dispatch(DispatchParameters params) {
if (params.getCompactionKind() == CompactionKind.USER) {
String hintType = params.getExecutionHints().get("compaction_type");
if (hintType != null) {
var userDispatch = userServices.get(hintType);
if (userDispatch != null) {
return userDispatch;
}
}
}
return services.get(params.getCompactionKind());
}
}
| 9,776 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionDispatchBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import java.util.Objects;
/**
* This class intentionally package private.
*/
class CompactionDispatchBuilder
implements CompactionDispatch.Builder, CompactionDispatch.ServiceBuilder {
private CompactionServiceId service;
@Override
public CompactionDispatch.Builder toService(CompactionServiceId service) {
this.service = Objects.requireNonNull(service, "CompactionServiceId cannot be null");
return this;
}
@Override
public CompactionDispatch.Builder toService(String compactionServiceId) {
this.service = CompactionServiceId.of(compactionServiceId);
return this;
}
@Override
public CompactionDispatch build() {
return new CompactionDispatchImpl(service);
}
}
| 9,777 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionPlan.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import java.util.Collection;
import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
import org.apache.accumulo.core.spi.compaction.CompactionPlanner.PlanningParameters;
/**
* The return value of {@link CompactionPlanner#makePlan(PlanningParameters)} that is created using
* {@link PlanningParameters#createPlanBuilder()}
*
* @since 2.1.0
* @see CompactionPlanner
* @see org.apache.accumulo.core.spi.compaction
*/
public interface CompactionPlan {
/**
* @since 2.1.0
* @see PlanningParameters#createPlanBuilder()
*/
interface Builder {
/**
* @param priority This determines the order in which the job is taken off the execution queue.
* Larger numbers are taken off the queue first. If two jobs are on the queue, one with a
* priority of 4 and another with 5, then the one with 5 will be taken first.
* @param executor Where the job should run.
* @param group The files to compact.
* @return this
*/
Builder addJob(short priority, CompactionExecutorId executor,
Collection<CompactableFile> group);
CompactionPlan build();
}
/**
* Return the set of jobs this plan will submit for compaction.
*/
Collection<CompactionJob> getJobs();
}
| 9,778 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionExecutorId.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import org.apache.accumulo.core.data.AbstractId;
/**
* A unique identifier for a a compaction executor that a {@link CompactionPlanner} can schedule
* compactions on using a {@link CompactionJob}.
*
* @since 2.1.0
* @see org.apache.accumulo.core.spi.compaction
*/
public class CompactionExecutorId extends AbstractId<CompactionExecutorId> {
private static final long serialVersionUID = 1L;
protected CompactionExecutorId(String canonical) {
super(canonical);
}
}
| 9,779 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import java.util.Set;
import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
/**
* An immutable object that describes what files to compact and where to compact them.
*
* @since 2.1.0
* @see org.apache.accumulo.core.spi.compaction
*/
public interface CompactionJob {
short getPriority();
/**
* @return The executor to run the job.
*/
CompactionExecutorId getExecutor();
/**
* @return The files to compact
*/
Set<CompactableFile> getFiles();
/**
* @return The kind of compaction this is.
*/
CompactionKind getKind();
}
| 9,780 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/ExecutorManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
/**
* Offered to a Compaction Planner at initialization time so it can create executors.
*
*
* @since 2.1.0
* @see CompactionPlanner#init(org.apache.accumulo.core.spi.compaction.CompactionPlanner.InitParameters)
* @see org.apache.accumulo.core.spi.compaction
*/
public interface ExecutorManager {
/**
* Create a thread pool executor within a compaction service.
*/
public CompactionExecutorId createExecutor(String name, int threads);
/**
* @return an id for a configured external execution queue.
*/
public CompactionExecutorId getExternalExecutor(String name);
}
| 9,781 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionDispatchImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
/**
* This class intentionally package private. It is immutable and provides default allocation for
* {@link CompactionDispatch}.
*/
class CompactionDispatchImpl implements CompactionDispatch {
private final CompactionServiceId service;
public CompactionDispatchImpl(CompactionServiceId service) {
this.service = service;
}
@Override
public CompactionServiceId getService() {
return service;
}
@Override
public String toString() {
return "service=" + service;
}
}
| 9,782 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionServiceId.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import org.apache.accumulo.core.data.AbstractId;
/**
* A unique identifier for a compaction service
*
* @since 2.1.0
*/
public class CompactionServiceId extends AbstractId<CompactionServiceId> {
private static final long serialVersionUID = 1L;
private CompactionServiceId(String canonical) {
super(canonical);
}
public static CompactionServiceId of(String canonical) {
return new CompactionServiceId(canonical);
}
}
| 9,783 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* This package provides a place for plugin interfaces related to executing compactions. The diagram
* below shows the functional components in Accumulo related to compactions. Not all of these
* components are pluggable, but understanding how everything fits together is important for writing
* a plugin.
*
* <p>
* <img src="doc-files/compaction-spi-design.png" alt="Compaction design diagram">
*
* <p>
* The following is a description of each functional component.
*
* <ul>
* <li><b>Compaction Manager</b> A non pluggable component within the tablet server that brings all
* other components together. The manager will route compactables to compaction services. For each
* kind of compaction, an individual compactable will be routed to a single compaction service. For
* example its possible that compactable C1 is routed to service S1 for user compactions and service
* S2 for system compactions.
* <ul>
* <li><b>Compaction Service</b> A non pluggable component that compacts tablets. One or more of
* these are created based on user configuration. Users can assign a table to a compaction service.
* Has a single compaction planner and one ore more compaction executors.
* <ul>
* <li><b>Compaction Executor</b> A non pluggable component that executes compactions using multiple
* threads and has a priority queue.
* <li><b>Compaction Planner</b> A pluggable component that can be configured by users when they
* configure a compaction service. It makes decisions about which files to compact on which
* executors. See {@link org.apache.accumulo.core.spi.compaction.CompactionPlanner},
* {@link org.apache.accumulo.core.spi.compaction.CompactionPlanner#makePlan(org.apache.accumulo.core.spi.compaction.CompactionPlanner.PlanningParameters)},
* and {@link org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner}
* </ul>
* </ul>
* <ul>
* <li><b>Compactable</b> A non pluggable component that wraps a Tablet and per table pluggable
* compaction components. It tracks all information about one or more running compactions that is
* needed by a compaction service in a thread safe manor. There is a 1 to 1 relationship between
* compactables and tablets.
* <ul>
* <li><b>Compaction Configurer</b> A pluggable component that can optionally be configured per
* table to dynamically configure file output settings. This supports use cases like using snappy
* for small files and gzip for large files. See
* {@link org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer}
* <li><b>Compaction Dispatcher</b> A pluggable component component that decides which compaction
* service a table should use for different kinds of compactions. This is configurable by users per
* table. See {@link org.apache.accumulo.core.spi.compaction.CompactionDispatcher}
* <li><b>Compaction Selector</b> A pluggable component that can optionally be configured per table
* to periodically select files to compact. This supports use cases like periodically compacting all
* files because there are too many deletes. See
* {@link org.apache.accumulo.core.client.admin.compaction.CompactionSelector}
* </ul>
* </ul>
* </ul>
*
* @see org.apache.accumulo.core.spi
*/
package org.apache.accumulo.core.spi.compaction;
| 9,784 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/compaction/CompactionPlanner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import java.util.Collection;
import java.util.Map;
import org.apache.accumulo.core.client.admin.CompactionConfig;
import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
/**
* Plans compaction work for a compaction service.
*
* @since 2.1.0
* @see org.apache.accumulo.core.spi.compaction
*/
public interface CompactionPlanner {
/**
* This interface exists so the API can evolve and additional parameters can be passed to the
* method in the future.
*
* @since 2.1.0
*/
public interface InitParameters {
ServiceEnvironment getServiceEnvironment();
/**
* @return The configured options. For example if the system properties
* {@code tserver.compaction.major.service.s1.planner.opts.p1=abc} and
* {@code tserver.compaction.major.service.s1.planner.opts.p9=123} were set, then this
* map would contain {@code p1=abc} and {@code p9=123}. In this example {@code s1} is
* the identifier for the compaction service. Each compaction service has a single
* planner.
*/
Map<String,String> getOptions();
/**
* @return For a given key from the map returned by {@link #getOptions()} determines the fully
* qualified tablet property for that key. For example if a planner was being
* initialized for compaction service {@code CS9} and this method were passed
* {@code prop1} then it would return
* {@code tserver.compaction.major.service.CS9.planner.opts.prop1}.
*/
String getFullyQualifiedOption(String key);
/**
* @return an execution manager that can be used to created thread pools within a compaction
* service.
*/
ExecutorManager getExecutorManager();
}
public void init(InitParameters params);
/**
* This interface exists so the API can evolve and additional parameters can be passed to the
* method in the future.
*
* @since 2.1.0
*/
public interface PlanningParameters {
/**
* @return The id of the table that compactions are being planned for.
* @see ServiceEnvironment#getTableName(TableId)
*/
TableId getTableId();
ServiceEnvironment getServiceEnvironment();
CompactionKind getKind();
/**
* @return the compaction ratio configured for the table
*/
double getRatio();
/**
* @return the set of all files a tablet has.
*/
Collection<CompactableFile> getAll();
/**
* @return the set of files that could be compacted depending on what {@link #getKind()}
* returns.
*/
Collection<CompactableFile> getCandidates();
/**
* @return jobs that are currently running
*/
Collection<CompactionJob> getRunningCompactions();
/**
* @return For a user compaction (when {@link #getKind()} returns {@link CompactionKind#USER})
* where the user set execution hints via
* {@link CompactionConfig#setExecutionHints(Map)} this will return those hints.
* Otherwise this will return an immutable empty map.
*/
Map<String,String> getExecutionHints();
/**
* @return A compaction plan builder that must be used to create a compaction plan.
*/
CompactionPlan.Builder createPlanBuilder();
}
/**
* <p>
* Plan what work a compaction service should do. The kind of compaction returned by
* {@link PlanningParameters#getKind()} determines what must be done with the files returned by
* {@link PlanningParameters#getCandidates()}. The following are the expectations for the
* candidates for each kind.
*
* <ul>
* <li>CompactionKind.SYSTEM The planner is not required to do anything with the candidates and
* can choose to compact zero or more of them. The candidates may represent a subset of all the
* files in the case where a user compaction is in progress or other compactions are running.
* <li>CompactionKind.USER and CompactionKind.SELECTED. The planner is required to eventually
* compact all candidates. Its ok to return a compaction plan that compacts a subset. When the
* planner compacts a subset, it will eventually be called again later. When it is called later
* the candidates will contain the files it did not compact and the results of any previous
* compactions it scheduled. The planner must eventually compact all of the files in the candidate
* set down to a single file. The compaction service will keep calling the planner until it does.
* </ul>
*
* <p>
* For a user and selector compaction assume the same thing happens, it will result in a slightly
* different outcome.
* <ol>
* <li>The candidate set passed to makePlan contains the files {@code [F1,F2,F3,F4]} and kind is
* USER
* <li>The planner returns a job to compact files {@code [F1,F2]} on executor E1
* <li>The compaction runs compacting {@code [F1,F2]} into file {@code [F5]}
* </ol>
*
* <p>
* For the case above, eventually the planner will called again with a candidate set of
* {@code [F3,F4,F5]} and it must eventually compact those three files to one.
*
* <p>
* When a planner returns a compactions plan, task will be queued on executors. Previously queued
* task that do not match the latest plan are removed. The planner is called periodically,
* whenever a new file is added, and whenever a compaction finishes.
*
* <p>
* Use {@link PlanningParameters#createPlanBuilder()} to build the plan this function returns.
*
* @see CompactionKind
*/
CompactionPlan makePlan(PlanningParameters params);
}
| 9,785 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ScanInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import java.util.Collection;
import java.util.Map;
import java.util.OptionalLong;
import java.util.Set;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.ScannerBase;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.IteratorConfiguration;
import org.apache.accumulo.core.spi.common.Stats;
/**
* Provides information about an active Accumulo scan against a tablet. Accumulo scans operate by
* repeatedly gathering batches of data and returning those to the client.
*
* <p>
* All times are in milliseconds and obtained using System.currentTimeMillis().
*
* @since 2.0.0
*/
public interface ScanInfo {
enum Type {
/**
* A single range scan started using a {@link Scanner}
*/
SINGLE,
/**
* A multi range scan started using a {@link BatchScanner}
*/
MULTI
}
Type getScanType();
TableId getTableId();
/**
* Returns the first time a tablet knew about a scan over its portion of data. This is the time a
* scan session was created inside a tablet server. If the scan goes across multiple tablet
* servers then within each tablet server there will be a different creation time.
*/
long getCreationTime();
/**
* If the scan has run, returns the last run time.
*/
OptionalLong getLastRunTime();
/**
* Returns timing statistics about running and gathering a batches of data.
*/
Stats getRunTimeStats();
/**
* Returns statistics about the time between running. These stats are only about the idle times
* before the last run time. The idle time after the last run time are not included. If the scan
* has never run, then there are no stats.
*/
Stats getIdleTimeStats();
/**
* This method is similar to {@link #getIdleTimeStats()}, but it also includes the time period
* between the last run time and now in the stats. If the scan has never run, then the stats are
* computed using only {@code currentTime - creationTime}.
*/
Stats getIdleTimeStats(long currentTime);
/**
* This method returns what column were fetched by a scan. When a family is fetched, a Column
* object where everything but the family is null is in the set.
*
* <p>
* The following example code shows how this method can be used to check if a family was fetched
* or a family+qualifier was fetched. If continually checking for the same column, should probably
* create a constant.
*
* <pre>
* <code>
* boolean wasFamilyFetched(ScanInfo si, byte[] fam) {
* Column family = new Column(fam, null, null);
* return si.getFetchedColumns().contains(family);
* }
*
* boolean wasColumnFetched(ScanInfo si, byte[] fam, byte[] qual) {
* Column col = new Column(fam, qual, null);
* return si.getFetchedColumns().contains(col);
* }
* </code>
* </pre>
*
*
* @return The family and family+qualifier pairs fetched.
*/
Set<Column> getFetchedColumns();
/**
* @return iterators that where configured on the client side scanner
*/
Collection<IteratorConfiguration> getClientScanIterators();
/**
* @return Hints set by a scanner using {@link ScannerBase#setExecutionHints(Map)}
*/
Map<String,String> getExecutionHints();
}
| 9,786 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/HintScanPrioritizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import static java.util.stream.Collectors.toUnmodifiableMap;
import java.util.Comparator;
import java.util.Map;
import org.apache.accumulo.core.client.ScannerBase;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* When configured for a scan executor, this prioritizer allows scanners to set priorities as
* integers. Lower integers result in higher priority.
*
* <p>
* Scanners can put the key/values {@code priority=<integer>} and/or {@code scan_type=<type>} in the
* map passed to {@link ScannerBase#setExecutionHints(Map)} to set the priority. When a
* {@code priority} hint is set it takes precedence and the value is used as the priority. When a
* {@code scan_type} hint is set the priority is looked up using the value.
*
* <p>
* This prioritizer accepts the option {@code default_priority=<integer>} which determines what
* priority to use for scans without a hint. If not set, then {@code default_priority} is
* {@link Integer#MAX_VALUE}.
*
* <p>
* This prioritizer accepts the option {@code bad_hint_action=fail|log|none}. This option determines
* what happens when a priority hint is not an integer. It defaults to {@code log} which logs a
* warning. The {@code fail} option throws an exception which may fail the scan. The {@code none}
* option silently ignores invalid hints.
*
* <p>
* This prioritizer accepts the option {@code priority.<type>=<integer>} which maps a scan type hint
* to a priority.
*
* <p>
* When two scans have the same priority, the scan is prioritized based on last run time and then
* creation time.
*
* <p>
* If a secondary or tertiary priority is needed, this can be done with bit shifting. For example
* assume a primary priority of 1 to 3 is desired followed by a secondary priority of 1 to 10 . This
* can be encoded as {@code int priority = primary << 4 | secondary}. When the primary bits are
* equal the comparison naturally falls back to the secondary bits. The example does not handle the
* case where the primary of secondary priorities are outside expected ranges.
*
* @since 2.0.0
*/
public class HintScanPrioritizer implements ScanPrioritizer {
private static final Logger log = LoggerFactory.getLogger(HintScanPrioritizer.class);
private final String PRIO_PREFIX = "priority.";
private enum HintProblemAction {
NONE, LOG, FAIL
}
private static int getPriority(ScanInfo si, int defaultPriority, HintProblemAction hpa,
Map<String,Integer> typePriorities) {
String prio = si.getExecutionHints().get("priority");
if (prio != null) {
try {
return Integer.parseInt(prio);
} catch (NumberFormatException nfe) {
switch (hpa) {
case FAIL:
throw nfe;
case LOG:
log.warn("Unable to parse priority hint {}, falling back to default {}.", prio,
defaultPriority);
break;
case NONE:
break;
default:
throw new IllegalStateException();
}
}
}
if (!typePriorities.isEmpty()) {
String scanType = si.getExecutionHints().get("scan_type");
if (scanType != null) {
Integer typePrio = typePriorities.get(scanType);
if (typePrio != null) {
return typePrio;
}
}
}
return defaultPriority;
}
@Override
public Comparator<ScanInfo> createComparator(CreateParameters params) {
int defaultPriority = Integer
.parseInt(params.getOptions().getOrDefault("default_priority", Integer.MAX_VALUE + ""));
Map<String,Integer> typePriorities =
params.getOptions().entrySet().stream().filter(e -> e.getKey().startsWith(PRIO_PREFIX))
.collect(toUnmodifiableMap(e -> e.getKey().substring(PRIO_PREFIX.length()),
e -> Integer.parseInt(e.getValue())));
HintProblemAction hpa = HintProblemAction.valueOf(params.getOptions()
.getOrDefault("bad_hint_action", HintProblemAction.LOG.name()).toUpperCase());
Comparator<ScanInfo> cmp =
Comparator.comparingInt(si -> getPriority(si, defaultPriority, hpa, typePriorities));
return cmp.thenComparingLong(si -> si.getLastRunTime().orElse(0))
.thenComparingLong(ScanInfo::getCreationTime);
}
}
| 9,787 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ScanServerSelections.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import java.time.Duration;
import org.apache.accumulo.core.data.TabletId;
/**
* Returned by {@link ScanServerSelector#selectServers(ScanServerSelector.SelectorParameters)} to
* specify what scan servers to use and how to use them.
*
* @since 2.1.0
*/
public interface ScanServerSelections {
/**
* @return what scan server to use for a given tablet. Returning null indicates the tablet server
* should be used for this tablet.
*/
String getScanServer(TabletId tabletId);
/**
* @return The amount of time to wait on the client side before starting to contact servers.
* Return {@link Duration#ZERO} if no client side wait is desired.
*/
Duration getDelay();
/**
* @return The amount of time to wait for a scan to start on the server side before reporting
* busy. For example if a scan request is sent to scan server with a busy timeout of 50ms
* and the scan has not started running within that time then the scan server will not
* ever run the scan and it will report back busy. If the scan starts running, then it
* will never report back busy. Setting a busy timeout that is ≤ 0 means that it will
* wait indefinitely on the server side for the task to start.
*/
Duration getBusyTimeout();
}
| 9,788 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ConfigurableScanServerSelector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.accumulo.core.util.LazySingletons.GSON;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.lang.reflect.Type;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.accumulo.core.data.TabletId;
import com.google.common.base.Preconditions;
import com.google.common.base.Suppliers;
import com.google.common.collect.Sets;
import com.google.common.hash.HashCode;
import com.google.common.hash.Hashing;
import com.google.gson.reflect.TypeToken;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* The default Accumulo selector for scan servers. This dispatcher will :
*
* <ul>
* <li>Hash each tablet to a per attempt configurable number of scan servers and then randomly
* choose one of those scan servers. Using hashing allows different client to select the same scan
* servers for a given tablet.</li>
* <li>Use a per attempt configurable busy timeout.</li>
* </ul>
*
* <p>
* This class accepts a single configuration that has a json value. To configure this class set
* {@code scan.server.selector.opts.profiles=<json>} in the accumulo client configuration along with
* the config for the class. The following is the default configuration value.
* </p>
* <p>
* {@value ConfigurableScanServerSelector#PROFILES_DEFAULT}
* </p>
*
* The json is structured as a list of profiles, with each profile having the following fields.
*
* <ul>
* <li><b>isDefault : </b> A boolean that specifies whether this is the default profile. One and
* only one profile must set this to true.</li>
* <li><b>maxBusyTimeout : </b> The maximum busy timeout to use. The busy timeout from the last
* attempt configuration grows exponentially up to this max.</li>
* <li><b>scanTypeActivations : </b> A list of scan types that will activate this profile. Scan
* types are specified by setting {@code scan_type=<scan_type>} as execution on the scanner. See
* {@link org.apache.accumulo.core.client.ScannerBase#setExecutionHints(Map)}</li>
* <li><b>group : </b> Scan servers can be started with an optional group. If specified, this option
* will limit the scan servers used to those that were started with this group name. If not
* specified, the set of scan servers that did not specify a group will be used. Grouping scan
* servers supports at least two use cases. First groups can be used to dedicate resources for
* certain scans. Second groups can be used to have different hardware/VM types for scans, for
* example could have some scans use expensive high memory VMs and others use cheaper burstable VMs.
* <li><b>attemptPlans : </b> A list of configuration to use for each scan attempt. Each list object
* has the following fields:
* <ul>
* <li><b>servers : </b> The number of servers to randomly choose from for this attempt.</li>
* <li><b>busyTimeout : </b> The busy timeout to use for this attempt.</li>
* <li><b>salt : </b> An optional string to append when hashing the tablet. When this is set
* differently for attempts it has the potential to cause the set of servers chosen from to be
* disjoint. When not set or the same, the servers between attempts will be subsets.</li>
* </ul>
* </li>
* </ul>
*
* <p>
* Below is an example configuration with two profiles, one is the default and the other is used
* when the scan execution hint {@code scan_type=slow} is set.
* </p>
*
* <pre>
* [
* {
* "isDefault":true,
* "maxBusyTimeout":"5m",
* "busyTimeoutMultiplier":4,
* "attemptPlans":[
* {"servers":"3", "busyTimeout":"33ms"},
* {"servers":"100%", "busyTimeout":"100ms"}
* ]
* },
* {
* "scanTypeActivations":["slow"],
* "maxBusyTimeout":"20m",
* "busyTimeoutMultiplier":8,
* "group":"lowcost",
* "attemptPlans":[
* {"servers":"1", "busyTimeout":"10s"},
* {"servers":"3", "busyTimeout":"30s","salt":"42"},
* {"servers":"9", "busyTimeout":"60s","salt":"84"}
* ]
* }
* ]
* </pre>
*
* <p>
* For the default profile in the example it will start off by choosing randomly from 3 scan servers
* based on a hash of the tablet with no salt. For the first attempt it will use a busy timeout of
* 33 milliseconds. If the first attempt returns with busy, then it will randomly choose from 100%
* or all servers for the second attempt and use a busy timeout of 100ms. For subsequent attempts it
* will keep choosing from all servers and start multiplying the busy timeout by 4 until the max
* busy timeout of 4 minutes is reached.
* </p>
*
* <p>
* For the profile activated by {@code scan_type=slow} it start off by choosing randomly from 1 scan
* server based on a hash of the tablet with no salt and a busy timeout of 10s. The second attempt
* will choose from 3 scan servers based on a hash of the tablet plus the salt {@literal 42}.
* Without the salt, the single scan servers from the first attempt would always be included in the
* set of 3. With the salt the single scan server from the first attempt may not be included. The
* third attempt will choose a scan server from 9 using the salt {@literal 84} and a busy timeout of
* 60s. The different salt means the set of servers that attempts 2 and 3 choose from may be
* disjoint. Attempt 4 and greater will continue to choose from the same 9 servers as attempt 3 and
* will keep increasing the busy timeout by multiplying 8 until the maximum of 20 minutes is
* reached. For this profile it will choose from scan servers in the group {@literal lowcost}.
* </p>
*
* @since 2.1.0
*/
public class ConfigurableScanServerSelector implements ScanServerSelector {
public static final String PROFILES_DEFAULT = "[{'isDefault':true,'maxBusyTimeout':'5m',"
+ "'busyTimeoutMultiplier':8, 'scanTypeActivations':[], "
+ "'attemptPlans':[{'servers':'3', 'busyTimeout':'33ms', 'salt':'one'},"
+ "{'servers':'13', 'busyTimeout':'33ms', 'salt':'two'},"
+ "{'servers':'100%', 'busyTimeout':'33ms'}]}]";
private Supplier<Map<String,List<String>>> orderedScanServersSupplier;
private Map<String,Profile> profiles;
private Profile defaultProfile;
private static final Set<String> OPT_NAMES = Set.of("profiles");
@SuppressFBWarnings(value = {"NP_UNWRITTEN_FIELD", "UWF_UNWRITTEN_FIELD"},
justification = "Object deserialized by GSON")
private static class AttemptPlan {
String servers;
String busyTimeout;
String salt = "";
transient double serversRatio;
transient int parsedServers;
transient boolean isServersPercent;
transient boolean parsed = false;
transient long parsedBusyTimeout;
void parse() {
if (parsed) {
return;
}
if (servers.endsWith("%")) {
// TODO check < 100
serversRatio = Double.parseDouble(servers.substring(0, servers.length() - 1)) / 100.0;
if (serversRatio < 0 || serversRatio > 1) {
throw new IllegalArgumentException("Bad servers percentage : " + servers);
}
isServersPercent = true;
} else {
parsedServers = Integer.parseInt(servers);
if (parsedServers <= 0) {
throw new IllegalArgumentException("Server must be positive : " + servers);
}
isServersPercent = false;
}
parsedBusyTimeout = ConfigurationTypeHelper.getTimeInMillis(busyTimeout);
parsed = true;
}
int getNumServers(int totalServers) {
parse();
if (isServersPercent) {
return Math.max(1, (int) Math.round(serversRatio * totalServers));
} else {
return Math.min(totalServers, parsedServers);
}
}
long getBusyTimeout() {
parse();
return parsedBusyTimeout;
}
}
@SuppressFBWarnings(value = {"NP_UNWRITTEN_PUBLIC_OR_PROTECTED_FIELD", "UWF_UNWRITTEN_FIELD"},
justification = "Object deserialized by GSON")
private static class Profile {
public List<AttemptPlan> attemptPlans;
List<String> scanTypeActivations;
boolean isDefault = false;
int busyTimeoutMultiplier;
String maxBusyTimeout;
String group = ScanServerSelector.DEFAULT_SCAN_SERVER_GROUP_NAME;
transient boolean parsed = false;
transient long parsedMaxBusyTimeout;
int getNumServers(int attempt, int totalServers) {
int index = Math.min(attempt, attemptPlans.size() - 1);
return attemptPlans.get(index).getNumServers(totalServers);
}
void parse() {
if (parsed) {
return;
}
parsedMaxBusyTimeout = ConfigurationTypeHelper.getTimeInMillis(maxBusyTimeout);
parsed = true;
}
long getBusyTimeout(int attempt) {
int index = Math.min(attempt, attemptPlans.size() - 1);
long busyTimeout = attemptPlans.get(index).getBusyTimeout();
if (attempt >= attemptPlans.size()) {
parse();
busyTimeout = (long) (busyTimeout
* Math.pow(busyTimeoutMultiplier, attempt - attemptPlans.size() + 1));
busyTimeout = Math.min(busyTimeout, parsedMaxBusyTimeout);
}
return busyTimeout;
}
public String getSalt(int attempts) {
int index = Math.min(attempts, attemptPlans.size() - 1);
return attemptPlans.get(index).salt;
}
}
private void parseProfiles(Map<String,String> options) {
Type listType = new TypeToken<ArrayList<Profile>>() {}.getType();
List<Profile> profList =
GSON.get().fromJson(options.getOrDefault("profiles", PROFILES_DEFAULT), listType);
profiles = new HashMap<>();
defaultProfile = null;
for (Profile prof : profList) {
if (prof.scanTypeActivations != null) {
for (String scanType : prof.scanTypeActivations) {
if (profiles.put(scanType, prof) != null) {
throw new IllegalArgumentException(
"Scan type activation seen in multiple profiles : " + scanType);
}
}
}
if (prof.isDefault) {
if (defaultProfile != null) {
throw new IllegalArgumentException("Multiple default profiles seen");
}
defaultProfile = prof;
}
}
if (defaultProfile == null) {
throw new IllegalArgumentException("No default profile specified");
}
}
@Override
public void init(ScanServerSelector.InitParameters params) {
// avoid constantly resorting the scan servers, just do it periodically in case they change
orderedScanServersSupplier = Suppliers.memoizeWithExpiration(() -> {
Collection<ScanServerInfo> scanServers = params.getScanServers().get();
Map<String,List<String>> groupedServers = new HashMap<>();
scanServers.forEach(sserver -> groupedServers
.computeIfAbsent(sserver.getGroup(), k -> new ArrayList<>()).add(sserver.getAddress()));
groupedServers.values().forEach(ssAddrs -> Collections.sort(ssAddrs));
return groupedServers;
}, 100, TimeUnit.MILLISECONDS);
var opts = params.getOptions();
var diff = Sets.difference(opts.keySet(), OPT_NAMES);
Preconditions.checkArgument(diff.isEmpty(), "Unknown options %s", diff);
parseProfiles(params.getOptions());
}
@Override
public ScanServerSelections selectServers(ScanServerSelector.SelectorParameters params) {
String scanType = params.getHints().get("scan_type");
Profile profile = null;
if (scanType != null) {
profile = profiles.getOrDefault(scanType, defaultProfile);
} else {
profile = defaultProfile;
}
// only get this once and use it for the entire method so that the method uses a consistent
// snapshot
List<String> orderedScanServers =
orderedScanServersSupplier.get().getOrDefault(profile.group, List.of());
if (orderedScanServers.isEmpty()) {
return new ScanServerSelections() {
@Override
public String getScanServer(TabletId tabletId) {
return null;
}
@Override
public Duration getDelay() {
return Duration.ZERO;
}
@Override
public Duration getBusyTimeout() {
return Duration.ZERO;
}
};
}
Map<TabletId,String> serversToUse = new HashMap<>();
// get the max number of busy attempts, treat errors as busy attempts
int attempts = params.getTablets().stream()
.mapToInt(tablet -> params.getAttempts(tablet).size()).max().orElse(0);
int numServers = profile.getNumServers(attempts, orderedScanServers.size());
for (TabletId tablet : params.getTablets()) {
String serverToUse = null;
var hashCode = hashTablet(tablet, profile.getSalt(attempts));
int serverIndex = (Math.abs(hashCode.asInt()) + RANDOM.get().nextInt(numServers))
% orderedScanServers.size();
serverToUse = orderedScanServers.get(serverIndex);
serversToUse.put(tablet, serverToUse);
}
Duration busyTO = Duration.ofMillis(profile.getBusyTimeout(attempts));
return new ScanServerSelections() {
@Override
public String getScanServer(TabletId tabletId) {
return serversToUse.get(tabletId);
}
@Override
public Duration getDelay() {
return Duration.ZERO;
}
@Override
public Duration getBusyTimeout() {
return busyTO;
}
};
}
private HashCode hashTablet(TabletId tablet, String salt) {
var hasher = Hashing.murmur3_128().newHasher();
if (tablet.getEndRow() != null) {
hasher.putBytes(tablet.getEndRow().getBytes(), 0, tablet.getEndRow().getLength());
} else {
hasher.putByte((byte) 5);
}
if (tablet.getPrevEndRow() != null) {
hasher.putBytes(tablet.getPrevEndRow().getBytes(), 0, tablet.getPrevEndRow().getLength());
} else {
hasher.putByte((byte) 7);
}
hasher.putString(tablet.getTable().canonical(), UTF_8);
if (salt != null && !salt.isEmpty()) {
hasher.putString(salt, UTF_8);
}
return hasher.hash();
}
}
| 9,789 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ScanServerInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
/**
* Information about a scan server.
*
* @since 2.1.0
*/
public interface ScanServerInfo {
/**
* @return the address in the form of {@code <host>:<port>} where the scan server is running.
*/
String getAddress();
/**
* @return the group name set when the scan server was started. If a group name was not set for
* the scan server, then the string
* {@value ScanServerSelector#DEFAULT_SCAN_SERVER_GROUP_NAME} is returned.
*/
String getGroup();
}
| 9,790 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/DefaultScanDispatch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
/**
* This class is intentionally package private. Do not make public!
*
* <p>
* The purpose of this class is to avoid any object creation in the case where
* {@code ScanDispatch.builder().build()} is called.
*/
class DefaultScanDispatch extends ScanDispatchImpl {
static DefaultScanDispatch DEFAULT_SCAN_DISPATCH = new DefaultScanDispatch();
private DefaultScanDispatch() {
super.build();
}
@Override
public Builder setExecutorName(String name) {
return new ScanDispatchImpl().setExecutorName(name);
}
@Override
public Builder setIndexCacheUsage(CacheUsage usage) {
return new ScanDispatchImpl().setIndexCacheUsage(usage);
}
@Override
public Builder setDataCacheUsage(CacheUsage usage) {
return new ScanDispatchImpl().setDataCacheUsage(usage);
}
@Override
public ScanDispatch build() {
return this;
}
}
| 9,791 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ScanDispatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import java.util.Map;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import com.google.common.base.Preconditions;
/**
* A per table scan dispatcher that decides which executor should be used to process a scan. For
* information about configuring, find the documentation for the {@code table.scan.dispatcher} and
* {@code table.scan.dispatcher.opts.} properties.
*
* @since 2.0.0
* @see org.apache.accumulo.core.spi
*/
public interface ScanDispatcher {
/**
* The method parameters for {@link ScanDispatcher#init(InitParameters)}. This interface exists so
* the API can evolve and additional parameters can be passed to the method in the future.
*
* @since 2.0.0
*/
public interface InitParameters {
/**
*
* @return The configured options. For example if the table properties
* {@code table.scan.dispatcher.opts.p1=abc} and
* {@code table.scan.dispatcher.opts.p9=123} were set, then this map would contain
* {@code p1=abc} and {@code p9=123}.
*/
Map<String,String> getOptions();
TableId getTableId();
ServiceEnvironment getServiceEnv();
}
/**
* This method is called once after a ScanDispatcher is instantiated.
*/
default void init(InitParameters params) {
Preconditions.checkArgument(params.getOptions().isEmpty(), "No options expected");
}
/**
* The method parameters for {@link ScanDispatcher#dispatch(DispatchParameters)}. This interface
* exists so the API can evolve and additional parameters can be passed to the method in the
* future.
*
* @since 2.1.0
*/
public interface DispatchParameters {
/**
* @return information about the scan to be dispatched.
*/
ScanInfo getScanInfo();
/**
* @return the currently configured scan executors
*/
Map<String,ScanExecutor> getScanExecutors();
ServiceEnvironment getServiceEnv();
}
/**
* Accumulo calls this method for each scan batch to determine what executor to use and how to
* utilize cache for the scan.
*
* @since 2.1.0
*/
ScanDispatch dispatch(DispatchParameters params);
}
| 9,792 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ScanServerAttempt.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
/**
* This object is used to communicate what previous actions were attempted, when they were
* attempted, and the result of those attempts
*
* @since 2.1.0
*/
public interface ScanServerAttempt {
// represents reasons that previous attempts to scan failed
enum Result {
BUSY, ERROR
}
String getServer();
ScanServerAttempt.Result getResult();
}
| 9,793 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ScanDispatch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import org.apache.accumulo.core.spi.scan.ScanDispatcher.DispatchParameters;
/**
* Encapsulates information about how a scan should be executed. This is the return type for
* {@link ScanDispatcher#dispatch(DispatchParameters)}. To create an instance of this use
* {@link ScanDispatch#builder()}
*
* @since 2.1.0
*/
public interface ScanDispatch {
/**
* Communicates how a scan should use cache.
*
* @since 2.1.0
*/
enum CacheUsage {
/**
* Use cache for this can, possibly overriding table settings.
*/
ENABLED,
/**
* Do not use cache for this scan, possibly overriding table settings.
*/
DISABLED,
/**
* Use data if it exists in cache, but never load data into cache.
*/
OPPORTUNISTIC,
/**
* Use the tables cache settings for this scan.
*/
TABLE
}
public String getExecutorName();
public CacheUsage getDataCacheUsage();
public CacheUsage getIndexCacheUsage();
/**
* @since 2.1.0
*/
public static interface Builder {
/**
* If this is not called, then {@value SimpleScanDispatcher#DEFAULT_SCAN_EXECUTOR_NAME} should
* be used.
*
* @param name a non null name of an existing scan executor to use for this scan from the key
* set of {@link ScanDispatcher.DispatchParameters#getScanExecutors()}
* @return may return self or a new object
*/
public Builder setExecutorName(String name);
/**
* If this is not called, then {@link CacheUsage#TABLE} should be used.
*
* @param usage a non null usage indicating how the scan should use cache for file metadata
* (like the index tree within a file)
* @return may return self or a new object
*/
public Builder setIndexCacheUsage(CacheUsage usage);
/**
* If this is not called, then {@link CacheUsage#TABLE} should be used.
*
* @param usage a non null usage indicating how the scan should use cache for file data
* @return may return self or a new object
*/
public Builder setDataCacheUsage(CacheUsage usage);
/**
* @return an immutable {@link ScanDispatch} object.
*/
public ScanDispatch build();
}
/**
* @return a {@link ScanDispatch} builder
*/
public static Builder builder() {
return DefaultScanDispatch.DEFAULT_SCAN_DISPATCH;
}
}
| 9,794 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ScanDispatchImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import java.util.Objects;
import org.apache.accumulo.core.spi.scan.ScanDispatch.Builder;
import com.google.common.base.Preconditions;
/**
* This class is intentionally package private. Do not make public!
*/
class ScanDispatchImpl implements ScanDispatch, Builder {
// The purpose of this is to allow building an immutable ScanDispatch object without creating
// separate Builder and ScanDispatch objects. This is done to reduce object creation and
// copying. This could easily be changed to two objects without changing the interfaces.
private boolean built = false;
private String executorName;
private CacheUsage indexCacheUsage;
private CacheUsage dataCacheUsage;
ScanDispatchImpl() {
executorName = SimpleScanDispatcher.DEFAULT_SCAN_EXECUTOR_NAME;
indexCacheUsage = CacheUsage.TABLE;
dataCacheUsage = CacheUsage.TABLE;
}
@Override
public String getExecutorName() {
Preconditions.checkState(built);
return executorName;
}
@Override
public Builder setExecutorName(String name) {
Preconditions.checkState(!built);
this.executorName = Objects.requireNonNull(name);
return this;
}
@Override
public ScanDispatch build() {
Preconditions.checkState(!built);
built = true;
return this;
}
@Override
public Builder setIndexCacheUsage(CacheUsage usage) {
Preconditions.checkState(!built);
this.indexCacheUsage = Objects.requireNonNull(usage);
return this;
}
@Override
public Builder setDataCacheUsage(CacheUsage usage) {
Preconditions.checkState(!built);
this.dataCacheUsage = Objects.requireNonNull(usage);
return this;
}
@Override
public CacheUsage getDataCacheUsage() {
Preconditions.checkState(built);
return dataCacheUsage;
}
@Override
public CacheUsage getIndexCacheUsage() {
Preconditions.checkState(built);
return indexCacheUsage;
}
@Override
public String toString() {
return "{executorName=" + executorName + ", indexCacheUsage=" + indexCacheUsage
+ ", dataCacheUsage=" + dataCacheUsage + ", built=" + built + "}";
}
}
| 9,795 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ScanServerSelector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import java.util.Collection;
import java.util.Map;
import java.util.function.Supplier;
import org.apache.accumulo.core.client.ScannerBase;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.lock.ServiceLockData;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import com.google.common.base.Preconditions;
/**
* A client side plugin that determines what scan servers to use for eventually consistent scans.
* When a scanner sets
* {@link org.apache.accumulo.core.client.ScannerBase#setConsistencyLevel(ScannerBase.ConsistencyLevel)}
* to {@link org.apache.accumulo.core.client.ScannerBase.ConsistencyLevel#EVENTUAL} then this plugin
* is used to determine which scan servers to use for a given tablet. To configure a class to use
* for this plugin, set its name using the client config {@code scan.server.selector.impl}
*
* @since 2.1.0
*/
public interface ScanServerSelector {
/**
* The scan server group name that will be used when one is not specified.
*/
String DEFAULT_SCAN_SERVER_GROUP_NAME = ServiceLockData.ServiceDescriptor.DEFAULT_GROUP_NAME;
/**
* This method is called once after a {@link ScanServerSelector} is instantiated.
*/
default void init(InitParameters params) {
Preconditions.checkArgument(params.getOptions().isEmpty(), "No options expected");
}
/**
* This interface exists so that is easier to evolve what is passed to
* {@link ScanServerSelector#init(InitParameters)} without having to make breaking changes.
*
* @since 2.1.0
*/
interface InitParameters {
/**
* @return Options that were set in the client config using the prefix
* {@code scan.server.selector.opts.}. The prefix will be stripped. For example if
* {@code scan.server.selector.opts.k1=v1} is set in client config, then the returned
* map will contain {@code k1=v1}.
*/
Map<String,String> getOptions();
ServiceEnvironment getServiceEnv();
/**
* @return the set of live ScanServers. Each time the supplier is called it may return something
* different. A good practice would be to call this no more than once per a call to
* {@link ScanServerSelector#selectServers(SelectorParameters)} so that decisions are
* made using a consistent set of scan servers.
*/
Supplier<Collection<ScanServerInfo>> getScanServers();
}
/**
* This interface exists so that is easier to evolve what is passed to
* {@link ScanServerSelector#selectServers(SelectorParameters)} without having to make breaking
* changes.
*
* @since 2.1.0
*/
interface SelectorParameters {
/**
* @return the set of tablets to be scanned
*/
Collection<TabletId> getTablets();
/**
* @return scan attempt information for the tablet
*/
Collection<? extends ScanServerAttempt> getAttempts(TabletId tabletId);
/**
* @return any hints set on a scanner using
* {@link org.apache.accumulo.core.client.ScannerBase#setExecutionHints(Map)}. If none
* were set, an empty map is returned.
*/
Map<String,String> getHints();
}
/**
* Uses the {@link SelectorParameters} to determine which, if any, ScanServer should be used for
* scanning a tablet.
*
* @param params parameters for the calculation
* @return results
*/
ScanServerSelections selectServers(SelectorParameters params);
}
| 9,796 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/IdleRatioScanPrioritizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import java.util.Comparator;
import com.google.common.base.Preconditions;
/**
* Prioritize scans based on the ratio of runTime/idleTime. Scans with a lower ratio have a higher
* priority. When the ratio is equal, the scan with the oldest last run time has the highest
* priority. If neither have run, then the oldest gets priority.
*
* @since 2.0.0
*/
public class IdleRatioScanPrioritizer implements ScanPrioritizer {
private static double idleRatio(long currTime, ScanInfo si) {
double totalRunTime = si.getRunTimeStats().sum();
double totalIdleTime = Math.max(1, si.getIdleTimeStats(currTime).sum());
return totalRunTime / totalIdleTime;
}
@Override
public Comparator<ScanInfo> createComparator(CreateParameters params) {
Preconditions.checkArgument(params.getOptions().isEmpty());
Comparator<ScanInfo> c1 = (si1, si2) -> {
long currTime = System.currentTimeMillis();
return Double.compare(idleRatio(currTime, si1), idleRatio(currTime, si2));
};
return c1.thenComparingLong(si -> si.getLastRunTime().orElse(0))
.thenComparingLong(ScanInfo::getCreationTime);
}
}
| 9,797 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ScanExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import java.util.Map;
import java.util.Optional;
/**
* Interface for obtaining information about a scan executor
*
* @since 2.0.0
*/
public interface ScanExecutor {
interface Config {
/**
* @return the unique name used to identified executor in config
*/
String getName();
/**
* @return the max number of threads that were configured
*/
int getMaxThreads();
/**
* @return the prioritizer that was configured
*/
Optional<String> getPrioritizerClass();
/**
* @return the prioritizer options
*/
Map<String,String> getPrioritizerOptions();
}
/**
* @return The number of task queued for the executor
*/
int getQueued();
/**
* @return The configuration used to create the executor
*/
Config getConfig();
}
| 9,798 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/ScanPrioritizer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import java.util.Comparator;
import java.util.Map;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
/**
* A factory for creating comparators used for prioritizing scans. For information about
* configuring, find the documentation for the {@code tserver.scan.executors.} property.
*
* @since 2.0.0
* @see org.apache.accumulo.core.spi
*/
public interface ScanPrioritizer {
/**
* The method parameters for {@link ScanPrioritizer#createComparator(CreateParameters)}. This
* interface exists so the API can evolve and additional parameters can be passed to the method in
* the future.
*
* @since 2.0.0
*/
public static interface CreateParameters {
/**
* @return The options configured for the scan prioritizer with properties of the form
* {@code tserver.scan.executors.<name>.prioritizer.opts.<key>=<value>}. Only the
* {@code <key>=<value>} portions of those properties ends up in the returned map.
*/
Map<String,String> getOptions();
ServiceEnvironment getServiceEnv();
}
Comparator<ScanInfo> createComparator(CreateParameters params);
}
| 9,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.