index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/SaslClientDigestCallbackHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import static java.util.Objects.requireNonNull;
import java.util.Arrays;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.RealmCallback;
import javax.security.sasl.RealmChoiceCallback;
import org.apache.accumulo.core.clientImpl.DelegationTokenImpl;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Client-side callbackhandler for sasl authentication which is the client-side sibling to the
* server-side {@link SaslDigestCallbackHandler}. Encoding of name, password and realm information
* must be consistent across the pair.
*/
public class SaslClientDigestCallbackHandler extends SaslDigestCallbackHandler {
private static final Logger log = LoggerFactory.getLogger(SaslClientDigestCallbackHandler.class);
private static final String NAME = SaslClientDigestCallbackHandler.class.getSimpleName();
private final String userName;
private final char[] userPassword;
public SaslClientDigestCallbackHandler(DelegationTokenImpl token) {
requireNonNull(token);
this.userName = encodeIdentifier(token.getIdentifier().getBytes());
this.userPassword = encodePassword(token.getPassword());
}
public SaslClientDigestCallbackHandler(String userName, char[] userPassword) {
requireNonNull(userName);
requireNonNull(userPassword);
this.userName = userName;
this.userPassword = userPassword;
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
NameCallback nc = null;
PasswordCallback pc = null;
RealmCallback rc = null;
for (Callback callback : callbacks) {
if (callback instanceof RealmChoiceCallback) {
continue;
} else if (callback instanceof NameCallback) {
nc = (NameCallback) callback;
} else if (callback instanceof PasswordCallback) {
pc = (PasswordCallback) callback;
} else if (callback instanceof RealmCallback) {
rc = (RealmCallback) callback;
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL client callback");
}
}
if (nc != null) {
log.debug("SASL client callback: setting username: {}", userName);
nc.setName(userName);
}
if (pc != null) {
log.debug("SASL client callback: setting userPassword");
pc.setPassword(userPassword);
}
if (rc != null) {
log.debug("SASL client callback: setting realm: {}", rc.getDefaultText());
rc.setText(rc.getDefaultText());
}
}
@Override
public String toString() {
return NAME;
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder(41, 47);
hcb.append(userName).append(userPassword);
return hcb.toHashCode();
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (o instanceof SaslClientDigestCallbackHandler) {
SaslClientDigestCallbackHandler other = (SaslClientDigestCallbackHandler) o;
return userName.equals(other.userName) && Arrays.equals(userPassword, other.userPassword);
}
return false;
}
}
| 9,900 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/UGIAssumingTransportFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import static java.util.Objects.requireNonNull;
import java.security.PrivilegedAction;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import org.apache.thrift.transport.TTransportFactory;
/**
* A TransportFactory that wraps another one, but assumes a specified UGI before calling through.
*
* This is used on the server side to assume the server's Principal when accepting clients.
*
* Borrowed from Apache Hive 0.14
*/
public class UGIAssumingTransportFactory extends TTransportFactory {
private final UserGroupInformation ugi;
private final TTransportFactory wrapped;
public UGIAssumingTransportFactory(TTransportFactory wrapped, UserGroupInformation ugi) {
requireNonNull(wrapped);
requireNonNull(ugi);
this.wrapped = wrapped;
this.ugi = ugi;
}
@Override
public TTransport getTransport(final TTransport trans) {
return ugi.doAs((PrivilegedAction<TTransport>) () -> {
try {
return wrapped.getTransport(trans);
} catch (TTransportException e) {
throw new IllegalStateException(e);
}
});
}
}
| 9,901 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/TraceProtocolFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import org.apache.accumulo.core.trace.TraceUtil;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TCompactProtocol;
import org.apache.thrift.protocol.TMessage;
import org.apache.thrift.protocol.TProtocol;
import org.apache.thrift.transport.TTransport;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.context.Scope;
/**
* {@link org.apache.thrift.protocol.TCompactProtocol.Factory} implementation which uses a protocol
* which traces
*/
public class TraceProtocolFactory extends TCompactProtocol.Factory {
private static final long serialVersionUID = 1L;
@Override
public TProtocol getProtocol(TTransport trans) {
return new TCompactProtocol(trans) {
private Span span = null;
private Scope scope = null;
@Override
public void writeMessageBegin(TMessage message) throws TException {
span = TraceUtil.startClientRpcSpan(this.getClass(), message.name);
scope = span.makeCurrent();
super.writeMessageBegin(message);
}
@Override
public void writeMessageEnd() throws TException {
super.writeMessageEnd();
scope.close();
span.end();
}
};
}
}
| 9,902 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/AccumuloTFramedTransportFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import org.apache.thrift.transport.layered.TFramedTransport;
/**
* This is a workaround for the issue reported in https://issues.apache.org/jira/browse/THRIFT-5732
* and can be removed once that issue is fixed.
*/
public class AccumuloTFramedTransportFactory extends TFramedTransport.Factory {
private final int maxMessageSize;
public AccumuloTFramedTransportFactory(int maxMessageSize) {
super(maxMessageSize);
this.maxMessageSize = maxMessageSize;
}
@Override
public TTransport getTransport(TTransport base) throws TTransportException {
// The input parameter "base" is typically going to be a TSocket implementation
// that represents a connection between two Accumulo endpoints (client-server,
// or server-server). The base transport has a maxMessageSize which defaults to
// 100MB. The FramedTransport that is created by this factory adds a header to
// the message with payload size information. The FramedTransport has a default
// frame size of 16MB, but the TFramedTransport constructor sets the frame size
// to the frame size set on the underlying transport ("base" in this case").
// According to current Thrift docs, a message has to fit into 1 frame, so the
// frame size will be set to the value that is lower. Prior to this class being
// created, we were only setting the frame size, so messages were capped at 100MB
// because that's the default maxMessageSize. Here we are setting the maxMessageSize
// and maxFrameSize to the same value on the "base" transport so that when the
// TFramedTransport object is created, it ends up using the values that we want.
base.getConfiguration().setMaxFrameSize(maxMessageSize);
base.getConfiguration().setMaxMessageSize(maxMessageSize);
return super.getTransport(base);
}
}
| 9,903 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/SaslConnectionParams.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import static java.util.Objects.requireNonNull;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import javax.security.auth.callback.CallbackHandler;
import javax.security.sasl.Sasl;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.KerberosToken;
import org.apache.accumulo.core.clientImpl.ClientConfConverter;
import org.apache.accumulo.core.clientImpl.DelegationTokenImpl;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.conf.Property;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Connection parameters for setting up a TSaslTransportFactory
*/
public class SaslConnectionParams {
private static final Logger log = LoggerFactory.getLogger(SaslConnectionParams.class);
/**
* Enumeration around {@link Sasl#QOP}
*/
public enum QualityOfProtection {
AUTH("auth"), AUTH_INT("auth-int"), AUTH_CONF("auth-conf");
private final String quality;
private QualityOfProtection(String quality) {
this.quality = quality;
}
public String getQuality() {
return quality;
}
public static QualityOfProtection get(String name) {
if (AUTH.quality.equals(name)) {
return AUTH;
} else if (AUTH_INT.quality.equals(name)) {
return AUTH_INT;
} else if (AUTH_CONF.quality.equals(name)) {
return AUTH_CONF;
}
throw new IllegalArgumentException("No value for " + name);
}
@Override
public String toString() {
return quality;
}
}
/**
* The SASL mechanism to use for authentication
*/
public enum SaslMechanism {
GSSAPI("GSSAPI"), // Kerberos
DIGEST_MD5("DIGEST-MD5"); // Delegation Tokens
private final String mechanismName;
private SaslMechanism(String mechanismName) {
this.mechanismName = mechanismName;
}
public String getMechanismName() {
return mechanismName;
}
public static SaslMechanism get(String mechanismName) {
if (GSSAPI.mechanismName.equals(mechanismName)) {
return GSSAPI;
} else if (DIGEST_MD5.mechanismName.equals(mechanismName)) {
return DIGEST_MD5;
}
throw new IllegalArgumentException("No value for " + mechanismName);
}
}
private static String defaultRealm;
static {
try {
defaultRealm = KerberosUtil.getDefaultRealm();
} catch (Exception ke) {
log.debug("Kerberos krb5 configuration not found, setting default realm to empty");
defaultRealm = "UNKNOWN";
}
}
protected String principal;
protected QualityOfProtection qop;
protected String kerberosServerPrimary;
protected SaslMechanism mechanism;
protected CallbackHandler callbackHandler;
protected final Map<String,String> saslProperties;
public SaslConnectionParams(AccumuloConfiguration conf, AuthenticationToken token) {
this(ClientConfConverter.toProperties(conf), token);
}
public SaslConnectionParams(Properties properties, AuthenticationToken token) {
requireNonNull(properties, "Properties was null");
requireNonNull(token, "AuthenticationToken was null");
saslProperties = new HashMap<>();
updatePrincipalFromUgi();
updateFromConfiguration(properties);
updateFromToken(token);
}
public static SaslConnectionParams from(AccumuloConfiguration config, AuthenticationToken token) {
if (!config.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
return null;
}
return new SaslConnectionParams(config, token);
}
protected void updateFromToken(AuthenticationToken token) {
if (token instanceof KerberosToken) {
mechanism = SaslMechanism.GSSAPI;
// No callbackhandlers necessary for GSSAPI
callbackHandler = null;
} else if (token instanceof DelegationTokenImpl) {
mechanism = SaslMechanism.DIGEST_MD5;
callbackHandler = new SaslClientDigestCallbackHandler((DelegationTokenImpl) token);
} else {
throw new IllegalArgumentException(
"Cannot determine SASL mechanism for token class: " + token.getClass());
}
}
protected void updatePrincipalFromUgi() {
// Ensure we're using Kerberos auth for Hadoop UGI
if (!UserGroupInformation.isSecurityEnabled()) {
throw new IllegalStateException("Cannot use SASL if Hadoop security is not enabled");
}
// Get the current user
UserGroupInformation currentUser;
try {
currentUser = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
throw new UncheckedIOException("Failed to get current user", e);
}
// The full name is our principal
this.principal = currentUser.getUserName();
if (this.principal == null) {
throw new IllegalStateException("Got null username from " + currentUser);
}
}
protected void updateFromConfiguration(Properties properties) {
// Get the quality of protection to use
final String qopValue = ClientProperty.SASL_QOP.getValue(properties);
this.qop = QualityOfProtection.get(qopValue);
// Add in the SASL properties to a map so we don't have to repeatedly construct this map
this.saslProperties.put(Sasl.QOP, this.qop.getQuality());
// The primary from the KRB principal on each server (e.g. primary/instance@realm)
this.kerberosServerPrimary = ClientProperty.SASL_KERBEROS_SERVER_PRIMARY.getValue(properties);
}
public Map<String,String> getSaslProperties() {
return Collections.unmodifiableMap(saslProperties);
}
/**
* The quality of protection used with SASL. See {@link Sasl#QOP} for more information.
*/
public QualityOfProtection getQualityOfProtection() {
return qop;
}
/**
* The 'primary' component from the Kerberos principals that servers are configured to use.
*/
public String getKerberosServerPrimary() {
return kerberosServerPrimary;
}
/**
* The principal of the logged in user for SASL
*/
public String getPrincipal() {
return principal;
}
/**
* The SASL mechanism to use for authentication
*/
public SaslMechanism getMechanism() {
return mechanism;
}
/**
* The SASL callback handler for this mechanism, may be null.
*/
public CallbackHandler getCallbackHandler() {
return callbackHandler;
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder(23, 29);
hcb.append(kerberosServerPrimary).append(saslProperties).append(qop.hashCode())
.append(principal).append(mechanism).append(callbackHandler);
return hcb.toHashCode();
}
@Override
public boolean equals(Object o) {
if (o instanceof SaslConnectionParams) {
SaslConnectionParams other = (SaslConnectionParams) o;
if (!kerberosServerPrimary.equals(other.kerberosServerPrimary)) {
return false;
}
if (qop != other.qop) {
return false;
}
if (!principal.equals(other.principal)) {
return false;
}
if (!mechanism.equals(other.mechanism)) {
return false;
}
if (!Objects.equals(callbackHandler, other.callbackHandler)) {
return false;
}
return saslProperties.equals(other.saslProperties);
}
return false;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder(64);
sb.append("SaslConnectionParams[").append("kerberosServerPrimary=")
.append(kerberosServerPrimary).append(", qualityOfProtection=").append(qop);
sb.append(", principal=").append(principal).append(", mechanism=").append(mechanism)
.append(", callbackHandler=").append(callbackHandler).append("]");
return sb.toString();
}
public static String getDefaultRealm() {
return defaultRealm;
}
}
| 9,904 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/SaslDigestCallbackHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import java.util.Base64;
import javax.security.auth.callback.CallbackHandler;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.TokenIdentifier;
/**
* Common serialization methods across the client and server callback handlers for SASL.
* Serialization and deserialization methods must be kept in sync.
*/
public abstract class SaslDigestCallbackHandler implements CallbackHandler {
/**
* Encode the serialized {@link TokenIdentifier} into a {@link String}.
*
* @param identifier The serialized identifier
* @see #decodeIdentifier(String)
*/
public String encodeIdentifier(byte[] identifier) {
return Base64.getEncoder().encodeToString(identifier);
}
/**
* Encode the token password into a character array.
*
* @param password The token password
* @see #getPassword(SecretManager, TokenIdentifier)
*/
public char[] encodePassword(byte[] password) {
return Base64.getEncoder().encodeToString(password).toCharArray();
}
/**
* Generate the password from the provided {@link SecretManager} and {@link TokenIdentifier}.
*
* @param secretManager The server SecretManager
* @param tokenid The TokenIdentifier from the client
* @see #encodePassword(byte[])
*/
public <T extends TokenIdentifier> char[] getPassword(SecretManager<T> secretManager, T tokenid)
throws InvalidToken {
return encodePassword(secretManager.retrievePassword(tokenid));
}
/**
* Decode the encoded {@link TokenIdentifier} into bytes suitable to reconstitute the identifier.
*
* @param identifier The encoded, serialized {@link TokenIdentifier}
* @see #encodeIdentifier(byte[])
*/
public byte[] decodeIdentifier(String identifier) {
return Base64.getDecoder().decode(identifier);
}
}
| 9,905 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/TTimeoutTransport.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
import java.nio.channels.spi.SelectorProvider;
import org.apache.hadoop.net.NetUtils;
import org.apache.thrift.transport.TIOStreamTransport;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.net.HostAndPort;
/**
* A utility class for setting up a {@link TTransport} with various necessary configurations for
* ideal performance in Accumulo. These configurations include:
* <ul>
* <li>Setting SO_LINGER=false on the socket.</li>
* <li>Setting TCP_NO_DELAY=true on the socket.</li>
* <li>Setting timeouts on the I/OStreams.</li>
* </ul>
*/
public class TTimeoutTransport {
private static final Logger log = LoggerFactory.getLogger(TTimeoutTransport.class);
private static final TTimeoutTransport INSTANCE = new TTimeoutTransport();
private TTimeoutTransport() {}
/**
* Creates a Thrift TTransport to the given address with the given timeout. All created resources
* are closed if an exception is thrown.
*
* @param addr The address to connect the client to
* @param timeoutMillis The timeout in milliseconds for the connection
* @return A TTransport connected to the given <code>addr</code>
* @throws TTransportException If the transport fails to be created/connected
*/
public static TTransport create(HostAndPort addr, long timeoutMillis) throws TTransportException {
return INSTANCE.createInternal(new InetSocketAddress(addr.getHost(), addr.getPort()),
timeoutMillis);
}
/**
* Opens a socket to the given <code>addr</code>, configures the socket, and then creates a Thrift
* transport using the socket.
*
* @param addr The address the socket should connect
* @param timeoutMillis The socket timeout in milliseconds
* @return A TTransport instance to the given <code>addr</code>
* @throws TTransportException If the Thrift client is failed to be connected/created
*/
TTransport createInternal(SocketAddress addr, long timeoutMillis) throws TTransportException {
Socket socket = null;
try {
socket = openSocket(addr, (int) timeoutMillis);
} catch (IOException e) {
// openSocket handles closing the Socket on error
ThriftUtil.checkIOExceptionCause(e);
throw new TTransportException(e);
}
// Should be non-null
assert socket != null;
// Set up the streams
try {
InputStream input = wrapInputStream(socket, timeoutMillis);
OutputStream output = wrapOutputStream(socket, timeoutMillis);
return new TIOStreamTransport(input, output);
} catch (IOException e) {
closeSocket(socket, e);
ThriftUtil.checkIOExceptionCause(e);
throw new TTransportException(e);
} catch (TTransportException e) {
closeSocket(socket, e);
throw e;
}
}
private void closeSocket(Socket socket, Exception e) {
try {
if (socket != null) {
socket.close();
}
} catch (IOException ioe) {
e.addSuppressed(ioe);
log.error("Failed to close socket after unsuccessful I/O stream setup", e);
}
}
// Visible for testing
InputStream wrapInputStream(Socket socket, long timeoutMillis) throws IOException {
return new BufferedInputStream(NetUtils.getInputStream(socket, timeoutMillis), 1024 * 10);
}
// Visible for testing
OutputStream wrapOutputStream(Socket socket, long timeoutMillis) throws IOException {
return new BufferedOutputStream(NetUtils.getOutputStream(socket, timeoutMillis), 1024 * 10);
}
/**
* Opens and configures a {@link Socket} for Accumulo RPC.
*
* @param addr The address to connect the socket to
* @param timeoutMillis The timeout in milliseconds to apply to the socket connect call
* @return A socket connected to the given address, or null if the socket fails to connect
*/
Socket openSocket(SocketAddress addr, int timeoutMillis) throws IOException {
Socket socket = null;
try {
socket = openSocketChannel();
socket.setSoLinger(false, 0);
socket.setTcpNoDelay(true);
socket.connect(addr, timeoutMillis);
return socket;
} catch (IOException e) {
closeSocket(socket, e);
throw e;
}
}
/**
* Opens a socket channel and returns the underlying socket.
*/
Socket openSocketChannel() throws IOException {
return SelectorProvider.provider().openSocketChannel().socket();
}
}
| 9,906 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/UGIAssumingTransport.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
/**
* The Thrift SASL transports call Sasl.createSaslServer and Sasl.createSaslClient inside open().
* So, we need to assume the correct UGI when the transport is opened so that the SASL mechanisms
* have access to the right principal. This transport wraps the Sasl transports to set up the right
* UGI context for open().
*
* This is used on the client side, where the API explicitly opens a transport to the server.
*
* Lifted from Apache Hive 0.14
*/
public class UGIAssumingTransport extends FilterTransport {
protected UserGroupInformation ugi;
public UGIAssumingTransport(TTransport wrapped, UserGroupInformation ugi) {
super(wrapped);
this.ugi = ugi;
}
@Override
public void open() throws TTransportException {
final AtomicReference<TTransportException> holder = new AtomicReference<>(null);
try {
ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
try {
getWrapped().open();
} catch (TTransportException tte) {
holder.set(tte);
}
return null;
});
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
// Make sure the transport exception gets (re)thrown if it happened
TTransportException tte = holder.get();
if (tte != null) {
throw tte;
}
}
}
| 9,907 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/ClientServiceThriftClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.clientImpl.thrift.ClientService.Client;
import org.apache.accumulo.core.lock.ServiceLockData.ThriftService;
import org.apache.accumulo.core.util.Pair;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ClientServiceThriftClient extends ThriftClientTypes<Client>
implements TServerClient<Client> {
private static final Logger LOG = LoggerFactory.getLogger(ClientServiceThriftClient.class);
private final AtomicBoolean warnedAboutTServersBeingDown = new AtomicBoolean(false);
ClientServiceThriftClient(String serviceName) {
super(serviceName, new Client.Factory());
}
@Override
public Pair<String,Client> getThriftServerConnection(ClientContext context,
boolean preferCachedConnections) throws TTransportException {
return getThriftServerConnection(LOG, this, context, preferCachedConnections,
warnedAboutTServersBeingDown, ThriftService.CLIENT);
}
@Override
public <R> R execute(ClientContext context, Exec<R,Client> exec)
throws AccumuloException, AccumuloSecurityException {
return execute(LOG, context, exec);
}
@Override
public void executeVoid(ClientContext context, ExecVoid<Client> exec)
throws AccumuloException, AccumuloSecurityException {
executeVoid(LOG, context, exec);
}
}
| 9,908 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/TabletIngestClientServiceThriftClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import org.apache.accumulo.core.tabletingest.thrift.TabletIngestClientService.Client;
/**
* Client side object that can be used to interact with services that support ingest operations
* against tablets. See TabletIngestClientService$Iface for a list of supported operations.
*/
public class TabletIngestClientServiceThriftClient extends ThriftClientTypes<Client> {
public TabletIngestClientServiceThriftClient(String serviceName) {
super(serviceName, new Client.Factory());
}
}
| 9,909 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/ManagerClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import static com.google.common.base.Preconditions.checkArgument;
import java.net.UnknownHostException;
import java.util.List;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.rpc.ThriftUtil;
import org.apache.thrift.TServiceClient;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import com.google.common.net.HostAndPort;
public interface ManagerClient<C extends TServiceClient> {
default C getManagerConnection(Logger log, ThriftClientTypes<C> type, ClientContext context) {
checkArgument(context != null, "context is null");
List<String> locations = context.getManagerLocations();
if (locations.isEmpty()) {
log.debug("No managers...");
return null;
}
HostAndPort manager = HostAndPort.fromString(locations.get(0));
if (manager.getPort() == 0) {
return null;
}
try {
// Manager requests can take a long time: don't ever time out
return ThriftUtil.getClientNoTimeout(type, manager, context);
} catch (TTransportException tte) {
Throwable cause = tte.getCause();
if (cause != null && cause instanceof UnknownHostException) {
// do not expect to recover from this
throw new IllegalStateException(tte);
}
log.debug("Failed to connect to manager=" + manager + ", will retry... ", tte);
return null;
}
}
}
| 9,910 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/TabletServerThriftClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.lock.ServiceLockData.ThriftService;
import org.apache.accumulo.core.tabletserver.thrift.TabletServerClientService.Client;
import org.apache.accumulo.core.util.Pair;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Client side object that can be used to interact with services that support operations against
* TabletServers. See TabletServerThriftClient$Iface for a list of supported operations.
*/
public class TabletServerThriftClient extends ThriftClientTypes<Client>
implements TServerClient<Client> {
private static final Logger LOG = LoggerFactory.getLogger(TabletServerThriftClient.class);
private final AtomicBoolean warnedAboutTServersBeingDown = new AtomicBoolean(false);
TabletServerThriftClient(String serviceName) {
super(serviceName, new Client.Factory());
}
@Override
public Pair<String,Client> getThriftServerConnection(ClientContext context,
boolean preferCachedConnections) throws TTransportException {
return getThriftServerConnection(LOG, this, context, preferCachedConnections,
warnedAboutTServersBeingDown, ThriftService.TSERV);
}
@Override
public <R> R execute(ClientContext context, Exec<R,Client> exec)
throws AccumuloException, AccumuloSecurityException {
return execute(LOG, context, exec);
}
@Override
public void executeVoid(ClientContext context, ExecVoid<Client> exec)
throws AccumuloException, AccumuloSecurityException {
executeVoid(LOG, context, exec);
}
}
| 9,911 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/CompactionCoordinatorServiceThriftClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import org.apache.accumulo.core.compaction.thrift.CompactionCoordinatorService.Client;
public class CompactionCoordinatorServiceThriftClient extends ThriftClientTypes<Client> {
CompactionCoordinatorServiceThriftClient(String serviceName) {
super(serviceName, new Client.Factory());
}
}
| 9,912 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/GCMonitorServiceThriftClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import org.apache.accumulo.core.gc.thrift.GCMonitorService.Client;
public class GCMonitorServiceThriftClient extends ThriftClientTypes<Client> {
GCMonitorServiceThriftClient(String serviceName) {
super(serviceName, new Client.Factory());
}
}
| 9,913 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/ManagerThriftClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import java.util.ConcurrentModificationException;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.NamespaceNotFoundException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.clientImpl.thrift.ThriftConcurrentModificationException;
import org.apache.accumulo.core.clientImpl.thrift.ThriftNotActiveServiceException;
import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.manager.thrift.ManagerClientService.Client;
import org.apache.accumulo.core.rpc.ThriftUtil;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ManagerThriftClient extends ThriftClientTypes<Client>
implements ManagerClient<Client> {
private static final Logger LOG = LoggerFactory.getLogger(ManagerThriftClient.class);
ManagerThriftClient(String serviceName) {
super(serviceName, new Client.Factory());
}
@Override
public Client getConnection(ClientContext context) {
return getManagerConnection(LOG, this, context);
}
public <R> R executeTableCommand(ClientContext context, Exec<R,Client> exec)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
Client client = null;
while (true) {
try {
client = getConnectionWithRetry(context);
return exec.execute(client);
} catch (TTransportException tte) {
LOG.debug("ManagerClient request failed, retrying ... ", tte);
sleepUninterruptibly(100, MILLISECONDS);
} catch (ThriftSecurityException e) {
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (ThriftTableOperationException e) {
switch (e.getType()) {
case NAMESPACE_NOTFOUND:
throw new TableNotFoundException(e.getTableName(), new NamespaceNotFoundException(e));
case NOTFOUND:
throw new TableNotFoundException(e);
default:
throw new AccumuloException(e);
}
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
LOG.debug("Contacted a Manager which is no longer active, retrying");
sleepUninterruptibly(100, MILLISECONDS);
} catch (ThriftConcurrentModificationException e) {
throw new ConcurrentModificationException(e.getMessage(), e);
} catch (Exception e) {
throw new AccumuloException(e);
} finally {
if (client != null) {
ThriftUtil.close(client, context);
}
}
}
}
@Override
public <R> R execute(ClientContext context, Exec<R,Client> exec)
throws AccumuloException, AccumuloSecurityException {
try {
return executeTableCommand(context, exec);
} catch (TableNotFoundException e) {
throw new AssertionError(e);
}
}
public void executeVoidTableCommand(ClientContext context, ExecVoid<Client> exec)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
Client client = null;
while (true) {
try {
client = getConnectionWithRetry(context);
exec.execute(client);
return;
} catch (TTransportException tte) {
LOG.debug("ManagerClient request failed, retrying ... ", tte);
sleepUninterruptibly(100, MILLISECONDS);
} catch (ThriftSecurityException e) {
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (ThriftTableOperationException e) {
switch (e.getType()) {
case NAMESPACE_NOTFOUND:
throw new TableNotFoundException(e.getTableName(), new NamespaceNotFoundException(e));
case NOTFOUND:
throw new TableNotFoundException(e);
default:
throw new AccumuloException(e);
}
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
LOG.debug("Contacted a Manager which is no longer active, retrying");
sleepUninterruptibly(100, MILLISECONDS);
} catch (ThriftConcurrentModificationException e) {
throw new ConcurrentModificationException(e.getMessage(), e);
} catch (Exception e) {
throw new AccumuloException(e);
} finally {
if (client != null) {
ThriftUtil.close(client, context);
}
}
}
}
@Override
public void executeVoid(ClientContext context, ExecVoid<Client> exec)
throws AccumuloException, AccumuloSecurityException {
try {
executeVoidTableCommand(context, exec);
} catch (TableNotFoundException e) {
throw new AccumuloException(e);
}
}
}
| 9,914 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/TServerClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.clientImpl.AccumuloServerException;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.clientImpl.ThriftTransportKey;
import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.fate.zookeeper.ZooCache;
import org.apache.accumulo.core.lock.ServiceLock;
import org.apache.accumulo.core.lock.ServiceLockData.ThriftService;
import org.apache.accumulo.core.rpc.ThriftUtil;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes.Exec;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes.ExecVoid;
import org.apache.accumulo.core.util.Pair;
import org.apache.thrift.TApplicationException;
import org.apache.thrift.TException;
import org.apache.thrift.TServiceClient;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
public interface TServerClient<C extends TServiceClient> {
Pair<String,C> getThriftServerConnection(ClientContext context, boolean preferCachedConnections)
throws TTransportException;
default Pair<String,C> getThriftServerConnection(Logger LOG, ThriftClientTypes<C> type,
ClientContext context, boolean preferCachedConnections, AtomicBoolean warned,
ThriftService service) throws TTransportException {
checkArgument(context != null, "context is null");
long rpcTimeout = context.getClientTimeoutInMillis();
// create list of servers
ArrayList<ThriftTransportKey> servers = new ArrayList<>();
// add tservers
List<String> serverPaths = new ArrayList<>();
serverPaths.add(context.getZooKeeperRoot() + Constants.ZTSERVERS);
if (type == ThriftClientTypes.CLIENT) {
serverPaths.add(context.getZooKeeperRoot() + Constants.ZCOMPACTORS);
serverPaths.add(context.getZooKeeperRoot() + Constants.ZSSERVERS);
Collections.shuffle(serverPaths, RANDOM.get());
}
ZooCache zc = context.getZooCache();
for (String serverPath : serverPaths) {
for (String server : zc.getChildren(serverPath)) {
var zLocPath = ServiceLock.path(serverPath + "/" + server);
zc.getLockData(zLocPath).map(sld -> sld.getAddress(service))
.map(address -> new ThriftTransportKey(address, rpcTimeout, context))
.ifPresent(servers::add);
}
}
boolean opened = false;
try {
Pair<String,TTransport> pair =
context.getTransportPool().getAnyTransport(servers, preferCachedConnections);
C client = ThriftUtil.createClient(type, pair.getSecond());
opened = true;
warned.set(false);
return new Pair<>(pair.getFirst(), client);
} finally {
if (!opened) {
if (warned.compareAndSet(false, true)) {
if (servers.isEmpty()) {
LOG.warn("There are no tablet servers: check that zookeeper and accumulo are running.");
} else {
LOG.warn("Failed to find an available server in the list of servers: {}", servers);
}
}
}
}
}
default <R> R execute(Logger LOG, ClientContext context, Exec<R,C> exec)
throws AccumuloException, AccumuloSecurityException {
while (true) {
String server = null;
C client = null;
try {
Pair<String,C> pair = getThriftServerConnection(context, true);
server = pair.getFirst();
client = pair.getSecond();
return exec.execute(client);
} catch (ThriftSecurityException e) {
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (TApplicationException tae) {
throw new AccumuloServerException(server, tae);
} catch (TTransportException tte) {
LOG.debug("ClientService request failed " + server + ", retrying ... ", tte);
sleepUninterruptibly(100, MILLISECONDS);
} catch (TException e) {
throw new AccumuloException(e);
} finally {
if (client != null) {
ThriftUtil.close(client, context);
}
}
}
}
default void executeVoid(Logger LOG, ClientContext context, ExecVoid<C> exec)
throws AccumuloException, AccumuloSecurityException {
while (true) {
String server = null;
C client = null;
try {
Pair<String,C> pair = getThriftServerConnection(context, true);
server = pair.getFirst();
client = pair.getSecond();
exec.execute(client);
return;
} catch (ThriftSecurityException e) {
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (TApplicationException tae) {
throw new AccumuloServerException(server, tae);
} catch (TTransportException tte) {
LOG.debug("ClientService request failed " + server + ", retrying ... ", tte);
sleepUninterruptibly(100, MILLISECONDS);
} catch (TException e) {
throw new AccumuloException(e);
} finally {
if (client != null) {
ThriftUtil.close(client, context);
}
}
}
}
}
| 9,915 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/TabletScanClientServiceThriftClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import org.apache.accumulo.core.tabletscan.thrift.TabletScanClientService.Client;
/**
* Client side object that can be used to interact with services that support scan operations
* against tablets. See TabletScanClientService$Iface for a list of supported operations.
*/
public class TabletScanClientServiceThriftClient extends ThriftClientTypes<Client> {
TabletScanClientServiceThriftClient(String serviceName) {
super(serviceName, new Client.Factory());
}
}
| 9,916 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/TabletManagementClientServiceThriftClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import org.apache.accumulo.core.tablet.thrift.TabletManagementClientService.Client;
/**
* Client side object that can be used to interact with services that support management operations
* against tablets. See TabletManagementClientService$Iface for a list of supported operations.
*/
public class TabletManagementClientServiceThriftClient extends ThriftClientTypes<Client> {
public TabletManagementClientServiceThriftClient(String serviceName) {
super(serviceName, new Client.Factory());
}
}
| 9,917 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/ThriftClientTypes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.thrift.TException;
import org.apache.thrift.TServiceClient;
import org.apache.thrift.TServiceClientFactory;
import org.apache.thrift.protocol.TMultiplexedProtocol;
import org.apache.thrift.protocol.TProtocol;
public abstract class ThriftClientTypes<C extends TServiceClient> {
public static final ClientServiceThriftClient CLIENT = new ClientServiceThriftClient("client");
public static final CompactorServiceThriftClient COMPACTOR =
new CompactorServiceThriftClient("compactor");
public static final CompactionCoordinatorServiceThriftClient COORDINATOR =
new CompactionCoordinatorServiceThriftClient("coordinator");
public static final FateThriftClient FATE = new FateThriftClient("fate");
public static final GCMonitorServiceThriftClient GC = new GCMonitorServiceThriftClient("gc");
public static final ManagerThriftClient MANAGER = new ManagerThriftClient("mgr");
public static final TabletServerThriftClient TABLET_SERVER =
new TabletServerThriftClient("tserver");
public static final TabletScanClientServiceThriftClient TABLET_SCAN =
new TabletScanClientServiceThriftClient("scan");
public static final TabletIngestClientServiceThriftClient TABLET_INGEST =
new TabletIngestClientServiceThriftClient("ingest");
public static final TabletManagementClientServiceThriftClient TABLET_MGMT =
new TabletManagementClientServiceThriftClient("tablet");
/**
* execute method with supplied client returning object of type R
*
* @param <R> return type
* @param <C> client type
*/
public interface Exec<R,C> {
R execute(C client) throws TException;
}
/**
* execute method with supplied client
*
* @param <C> client type
*/
public interface ExecVoid<C> {
void execute(C client) throws TException;
}
private final String serviceName;
private final TServiceClientFactory<C> clientFactory;
public ThriftClientTypes(String serviceName, TServiceClientFactory<C> factory) {
this.serviceName = serviceName;
this.clientFactory = factory;
}
public final String getServiceName() {
return serviceName;
}
public final TServiceClientFactory<C> getClientFactory() {
return clientFactory;
}
public C getClient(TProtocol prot) {
// All server side TProcessors are multiplexed. Wrap this protocol.
return getClientFactory().getClient(new TMultiplexedProtocol(prot, getServiceName()));
}
public C getConnection(ClientContext context) {
throw new UnsupportedOperationException("This method has not been implemented");
}
public C getConnectionWithRetry(ClientContext context) {
while (true) {
C result = getConnection(context);
if (result != null) {
return result;
}
sleepUninterruptibly(250, MILLISECONDS);
}
}
public <R> R execute(ClientContext context, Exec<R,C> exec)
throws AccumuloException, AccumuloSecurityException {
throw new UnsupportedOperationException("This method has not been implemented");
}
public void executeVoid(ClientContext context, ExecVoid<C> exec)
throws AccumuloException, AccumuloSecurityException {
throw new UnsupportedOperationException("This method has not been implemented");
}
}
| 9,918 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/FateThriftClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.manager.thrift.FateService.Client;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class FateThriftClient extends ThriftClientTypes<Client> implements ManagerClient<Client> {
private static Logger LOG = LoggerFactory.getLogger(FateThriftClient.class);
FateThriftClient(String serviceName) {
super(serviceName, new Client.Factory());
}
@Override
public Client getConnection(ClientContext context) {
return getManagerConnection(LOG, this, context);
}
}
| 9,919 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/clients/CompactorServiceThriftClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc.clients;
import org.apache.accumulo.core.compaction.thrift.CompactorService.Client;
public class CompactorServiceThriftClient extends ThriftClientTypes<Client> {
CompactorServiceThriftClient(String serviceName) {
super(serviceName, new Client.Factory());
}
}
| 9,920 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/ConstraintViolationSummary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import java.io.Serializable;
import org.apache.accumulo.core.dataImpl.thrift.TConstraintViolationSummary;
/**
* A summary of constraint violations across some number of mutations.
*/
public class ConstraintViolationSummary implements Serializable {
private static final long serialVersionUID = 1L;
public String constrainClass;
public short violationCode;
public String violationDescription;
public long numberOfViolatingMutations;
/**
* Creates a new summary.
*
* @param constrainClass class of constraint that was violated
* @param violationCode violation code
* @param violationDescription description of violation
* @param numberOfViolatingMutations number of mutations that produced this particular violation
*/
public ConstraintViolationSummary(String constrainClass, short violationCode,
String violationDescription, long numberOfViolatingMutations) {
this.constrainClass = constrainClass;
this.violationCode = violationCode;
this.violationDescription = violationDescription;
this.numberOfViolatingMutations = numberOfViolatingMutations;
}
/**
* Creates a new summary from Thrift.
*
* @param tcvs Thrift summary
*/
public ConstraintViolationSummary(TConstraintViolationSummary tcvs) {
this(tcvs.constrainClass, tcvs.violationCode, tcvs.violationDescription,
tcvs.numberOfViolatingMutations);
}
public String getConstrainClass() {
return this.constrainClass;
}
public short getViolationCode() {
return this.violationCode;
}
public String getViolationDescription() {
return this.violationDescription;
}
public long getNumberOfViolatingMutations() {
return this.numberOfViolatingMutations;
}
@Override
public String toString() {
return String.format(
"ConstraintViolationSummary(constrainClass:%s, violationCode:%d,"
+ " violationDescription:%s, numberOfViolatingMutations:%d)",
constrainClass, violationCode, violationDescription, numberOfViolatingMutations);
}
/**
* Converts this summary to Thrift.
*
* @return Thrift summary
*/
public TConstraintViolationSummary toThrift() {
return new TConstraintViolationSummary(this.constrainClass, violationCode, violationDescription,
numberOfViolatingMutations);
}
}
| 9,921 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/Mutation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.accumulo.core.dataImpl.thrift.TMutation;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.core.util.ByteBufferUtil;
import org.apache.accumulo.core.util.UnsynchronizedBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* Mutation represents an action that manipulates a row in a table. A mutation holds a list of
* column/value pairs that represent an atomic set of modifications to make to a row.
*
* <p>
* Convenience methods which takes columns and value as CharSequence (String implements
* CharSequence) are provided. CharSequence is converted to UTF-8 by constructing a new Text object.
*
* <p>
* When always passing in the same data as a CharSequence/String, it's probably more efficient to
* call the Text put methods. This way the data is only encoded once and only one Text object is
* created.
*
* <p>
* All of the put methods append data to the mutation; they do not overwrite anything that was
* previously put. The mutation holds a list of all columns/values that were put into it.
*
* <p>
* The putDelete() methods do not remove something that was previously added to the mutation;
* rather, they indicate that Accumulo should insert a delete marker for that row column. A delete
* marker effectively hides entries for that row column with a timestamp earlier than the marker's.
* (The hidden data is eventually removed during Accumulo garbage collection.)
*
* <p>
* This class has many overloaded {@code put} and {@code putDelete} methods. These were added to
* support different subset of fields and types. The functionality of all of these {@code put}
* methods and more is provided by the new fluent {@link #at()} method added in 2.0.
*/
public class Mutation implements Writable {
/**
* Internally, this class keeps most mutation data in a byte buffer. If a cell value put into a
* mutation exceeds this size, then it is stored in a separate buffer, and a reference to it is
* inserted into the main buffer.
*/
static final int VALUE_SIZE_COPY_CUTOFF = 1 << 15;
/**
* Maximum size of a mutation (2GB).
*/
static final long MAX_MUTATION_SIZE = (1L << 31);
static final long SERIALIZATION_OVERHEAD = 5;
/**
* Formats available for serializing Mutations. The formats are described in a
* <a href="doc-files/mutation-serialization.html">separate document</a>.
*/
public enum SERIALIZED_FORMAT {
VERSION1, VERSION2
}
private boolean useOldDeserialize = false;
private byte[] row;
private byte[] data;
private int entries;
private List<byte[]> values;
// tracks estimated size of row.length + largeValues.length
@VisibleForTesting
long estRowAndLargeValSize = 0;
private UnsynchronizedBuffer.Writer buffer;
private List<ColumnUpdate> updates;
private static final byte[] EMPTY_BYTES = new byte[0];
private void serialize() {
if (buffer != null) {
data = buffer.toArray();
buffer = null;
}
}
/**
* This is so hashCode and equals can be called without changing this object.
*
* It will return a copy of the current data buffer if serialized has not been called previously.
* Otherwise, this.data will be returned since the buffer is null and will not change.
*/
private ByteBuffer serializedSnapshot() {
if (buffer != null) {
return this.buffer.toByteBuffer();
} else {
return ByteBuffer.wrap(this.data);
}
}
/**
* Creates a new mutation. A defensive copy is made.
*
* @param row row ID
* @since 1.5.0
*/
public Mutation(byte[] row) {
this(row, 0, row.length);
}
/**
* Creates a new mutation. A defensive copy is made.
*
* @param row row ID
* @param initialBufferSize the initial size, in bytes, of the internal buffer for serializing
* @since 1.7.0
*/
public Mutation(byte[] row, int initialBufferSize) {
this(row, 0, row.length, initialBufferSize);
}
/**
* Creates a new mutation. A defensive copy is made.
*
* @param row byte array containing row ID
* @param start starting index of row ID in byte array
* @param length length of row ID in byte array
* @throws IndexOutOfBoundsException if start or length is invalid
* @since 1.5.0
*/
public Mutation(byte[] row, int start, int length) {
this(row, start, length, 64);
}
/**
* Creates a new mutation. A defensive copy is made.
*
* @param row byte array containing row ID
* @param start starting index of row ID in byte array
* @param length length of row ID in byte array
* @param initialBufferSize the initial size, in bytes, of the internal buffer for serializing
* @throws IndexOutOfBoundsException if start or length is invalid
* @since 1.7.0
*/
public Mutation(byte[] row, int start, int length, int initialBufferSize) {
this.row = new byte[length];
System.arraycopy(row, start, this.row, 0, length);
buffer = new UnsynchronizedBuffer.Writer(initialBufferSize);
estRowAndLargeValSize = length + SERIALIZATION_OVERHEAD;
}
/**
* Creates a new mutation. A defensive copy is made.
*
* @param row row ID
*/
public Mutation(Text row) {
this(row.getBytes(), 0, row.getLength());
}
/**
* Creates a new mutation. A defensive copy is made.
*
* @param row row ID
* @param initialBufferSize the initial size, in bytes, of the internal buffer for serializing
* @since 1.7.0
*/
public Mutation(Text row, int initialBufferSize) {
this(row.getBytes(), 0, row.getLength(), initialBufferSize);
}
/**
* Creates a new mutation.
*
* @param row row ID
*/
public Mutation(CharSequence row) {
this(new Text(row.toString()));
}
/**
* Creates a new mutation.
*
* @param row row ID
* @param initialBufferSize the initial size, in bytes, of the internal buffer for serializing
* @since 1.7.0
*/
public Mutation(CharSequence row, int initialBufferSize) {
this(new Text(row.toString()), initialBufferSize);
}
/**
* Creates a new mutation.
*/
public Mutation() {}
/**
* Creates a new mutation from a Thrift mutation.
*
* @param tmutation Thrift mutation
*/
public Mutation(TMutation tmutation) {
this.row = ByteBufferUtil.toBytes(tmutation.row);
this.data = ByteBufferUtil.toBytes(tmutation.data);
this.entries = tmutation.entries;
this.values = ByteBufferUtil.toBytesList(tmutation.values);
if (this.row == null) {
throw new IllegalArgumentException("null row");
}
if (this.data == null) {
throw new IllegalArgumentException("null serialized data");
}
}
/**
* Creates a new mutation by copying another.
*
* @param m mutation to copy
*/
public Mutation(Mutation m) {
m.serialize();
this.row = m.row;
this.data = m.data;
this.entries = m.entries;
this.values = m.values;
}
/**
* Gets the row ID for this mutation. Not a defensive copy.
*
* @return row ID
*/
public byte[] getRow() {
return row;
}
private void fill(byte[] b) {
fill(b, b.length);
}
private void fill(byte[] b, int length) {
buffer.writeVLong(length);
buffer.add(b, 0, length);
}
private void fill(boolean b) {
buffer.add(b);
}
private void fill(int i) {
buffer.writeVLong(i);
}
private void fill(long l) {
buffer.writeVLong(l);
}
private void put(byte[] cf, byte[] cq, byte[] cv, boolean hasts, long ts, boolean deleted,
byte[] val) {
put(cf, cf.length, cq, cq.length, cv, hasts, ts, deleted, val, val.length);
}
/*
* When dealing with Text object the length must be gotten from the object, not from the byte
* array.
*/
private void put(Text cf, Text cq, byte[] cv, boolean hasts, long ts, boolean deleted,
byte[] val) {
put(cf.getBytes(), cf.getLength(), cq.getBytes(), cq.getLength(), cv, hasts, ts, deleted, val,
val.length);
}
private void put(byte[] cf, int cfLength, byte[] cq, int cqLength, byte[] cv, boolean hasts,
long ts, boolean deleted, byte[] val, int valLength) {
if (buffer == null) {
throw new IllegalStateException("Can not add to mutation after serializing it");
}
long estimatedSizeAfterPut = estRowAndLargeValSize + buffer.size() + cfLength + cqLength
+ cv.length + (hasts ? 8 : 0) + valLength + 2 + 4 * SERIALIZATION_OVERHEAD;
Preconditions.checkArgument(
estimatedSizeAfterPut < MAX_MUTATION_SIZE && estimatedSizeAfterPut >= 0,
"Maximum mutation size must be less than 2GB ");
fill(cf, cfLength);
fill(cq, cqLength);
fill(cv);
fill(hasts);
if (hasts) {
fill(ts);
}
fill(deleted);
if (valLength < VALUE_SIZE_COPY_CUTOFF) {
fill(val, valLength);
} else {
if (values == null) {
values = new ArrayList<>();
}
byte[] copy = new byte[valLength];
System.arraycopy(val, 0, copy, 0, valLength);
values.add(copy);
fill(-1 * values.size());
estRowAndLargeValSize += valLength + SERIALIZATION_OVERHEAD;
}
entries++;
}
private void put(CharSequence cf, CharSequence cq, byte[] cv, boolean hasts, long ts,
boolean deleted, byte[] val) {
put(new Text(cf.toString()), new Text(cq.toString()), cv, hasts, ts, deleted, val);
}
private void put(Text cf, Text cq, byte[] cv, boolean hasts, long ts, boolean deleted, Text val) {
put(cf.getBytes(), cf.getLength(), cq.getBytes(), cq.getLength(), cv, hasts, ts, deleted,
val.getBytes(), val.getLength());
}
private void put(CharSequence cf, CharSequence cq, byte[] cv, boolean hasts, long ts,
boolean deleted, CharSequence val) {
put(new Text(cf.toString()), new Text(cq.toString()), cv, hasts, ts, deleted,
new Text(val.toString()));
}
/**
* Puts a modification in this mutation. Column visibility is empty; timestamp is not set. All
* parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param value cell value
* @see #at()
*/
public void put(Text columnFamily, Text columnQualifier, Value value) {
put(columnFamily, columnQualifier, EMPTY_BYTES, false, 0L, false, value.get());
}
/**
* Puts a modification in this mutation. Timestamp is not set. All parameters are defensively
* copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param value cell value
* @see #at()
*/
public void put(Text columnFamily, Text columnQualifier, ColumnVisibility columnVisibility,
Value value) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), false, 0L, false,
value.get());
}
/**
* Puts a modification in this mutation. Column visibility is empty. All appropriate parameters
* are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param timestamp timestamp
* @param value cell value
* @see #at()
*/
public void put(Text columnFamily, Text columnQualifier, long timestamp, Value value) {
put(columnFamily, columnQualifier, EMPTY_BYTES, true, timestamp, false, value.get());
}
/**
* Puts a modification in this mutation. All appropriate parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param timestamp timestamp
* @param value cell value
* @see #at()
*/
public void put(Text columnFamily, Text columnQualifier, ColumnVisibility columnVisibility,
long timestamp, Value value) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), true, timestamp, false,
value.get());
}
/**
* Puts a deletion in this mutation. Matches empty column visibility; timestamp is not set. All
* parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @see #at()
*/
public void putDelete(Text columnFamily, Text columnQualifier) {
put(columnFamily, columnQualifier, EMPTY_BYTES, false, 0L, true, EMPTY_BYTES);
}
/**
* Puts a deletion in this mutation. Timestamp is not set. All parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @see #at()
*/
public void putDelete(Text columnFamily, Text columnQualifier,
ColumnVisibility columnVisibility) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), false, 0L, true,
EMPTY_BYTES);
}
/**
* Puts a deletion in this mutation. Matches empty column visibility. All appropriate parameters
* are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param timestamp timestamp
* @see #at()
*/
public void putDelete(Text columnFamily, Text columnQualifier, long timestamp) {
put(columnFamily, columnQualifier, EMPTY_BYTES, true, timestamp, true, EMPTY_BYTES);
}
/**
* Puts a deletion in this mutation. All appropriate parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param timestamp timestamp
* @see #at()
*/
public void putDelete(Text columnFamily, Text columnQualifier, ColumnVisibility columnVisibility,
long timestamp) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), true, timestamp, true,
EMPTY_BYTES);
}
/**
* Puts a modification in this mutation. Column visibility is empty; timestamp is not set. All
* parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @see #at()
*/
public void put(CharSequence columnFamily, CharSequence columnQualifier, Value value) {
put(columnFamily, columnQualifier, EMPTY_BYTES, false, 0L, false, value.get());
}
/**
* Puts a modification in this mutation. Timestamp is not set. All parameters are defensively
* copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param value cell value
* @see #at()
*/
public void put(CharSequence columnFamily, CharSequence columnQualifier,
ColumnVisibility columnVisibility, Value value) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), false, 0L, false,
value.get());
}
/**
* Puts a modification in this mutation. Column visibility is empty. All appropriate parameters
* are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param timestamp timestamp
* @param value cell value
* @see #at()
*/
public void put(CharSequence columnFamily, CharSequence columnQualifier, long timestamp,
Value value) {
put(columnFamily, columnQualifier, EMPTY_BYTES, true, timestamp, false, value.get());
}
/**
* Puts a modification in this mutation. All appropriate parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param timestamp timestamp
* @param value cell value
* @see #at()
*/
public void put(CharSequence columnFamily, CharSequence columnQualifier,
ColumnVisibility columnVisibility, long timestamp, Value value) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), true, timestamp, false,
value.get());
}
/**
* Puts a deletion in this mutation. Matches empty column visibility; timestamp is not set. All
* parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @see #at()
*/
public void putDelete(CharSequence columnFamily, CharSequence columnQualifier) {
put(columnFamily, columnQualifier, EMPTY_BYTES, false, 0L, true, EMPTY_BYTES);
}
/**
* Puts a deletion in this mutation. Timestamp is not set. All appropriate parameters are
* defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @see #at()
*/
public void putDelete(CharSequence columnFamily, CharSequence columnQualifier,
ColumnVisibility columnVisibility) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), false, 0L, true,
EMPTY_BYTES);
}
/**
* Puts a deletion in this mutation. Matches empty column visibility. All appropriate parameters
* are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param timestamp timestamp
* @see #at()
*/
public void putDelete(CharSequence columnFamily, CharSequence columnQualifier, long timestamp) {
put(columnFamily, columnQualifier, EMPTY_BYTES, true, timestamp, true, EMPTY_BYTES);
}
/**
* Puts a deletion in this mutation. All appropriate parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param timestamp timestamp
* @see #at()
*/
public void putDelete(CharSequence columnFamily, CharSequence columnQualifier,
ColumnVisibility columnVisibility, long timestamp) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), true, timestamp, true,
EMPTY_BYTES);
}
/**
* Puts a modification in this mutation. Column visibility is empty; timestamp is not set. All
* parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param value cell value
* @see #at()
*/
public void put(CharSequence columnFamily, CharSequence columnQualifier, CharSequence value) {
put(columnFamily, columnQualifier, EMPTY_BYTES, false, 0L, false, value);
}
/**
* Puts a modification in this mutation. Timestamp is not set. All parameters are defensively
* copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param value cell value
* @see #at()
*/
public void put(CharSequence columnFamily, CharSequence columnQualifier,
ColumnVisibility columnVisibility, CharSequence value) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), false, 0L, false, value);
}
/**
* Puts a modification in this mutation. Column visibility is empty. All appropriate parameters
* are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param timestamp timestamp
* @param value cell value
* @see #at()
*/
public void put(CharSequence columnFamily, CharSequence columnQualifier, long timestamp,
CharSequence value) {
put(columnFamily, columnQualifier, EMPTY_BYTES, true, timestamp, false, value);
}
/**
* Puts a modification in this mutation. All appropriate parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param timestamp timestamp
* @param value cell value
* @see #at()
*/
public void put(CharSequence columnFamily, CharSequence columnQualifier,
ColumnVisibility columnVisibility, long timestamp, CharSequence value) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), true, timestamp, false,
value);
}
/**
* Puts a modification in this mutation. Column visibility is empty; timestamp is not set. All
* parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param value cell value
* @since 1.5.0
* @see #at()
*/
public void put(byte[] columnFamily, byte[] columnQualifier, byte[] value) {
put(columnFamily, columnQualifier, EMPTY_BYTES, false, 0L, false, value);
}
/**
* Puts a modification in this mutation. Timestamp is not set. All parameters are defensively
* copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param value cell value
* @since 1.5.0
* @see #at()
*/
public void put(byte[] columnFamily, byte[] columnQualifier, ColumnVisibility columnVisibility,
byte[] value) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), false, 0L, false, value);
}
/**
* Puts a modification in this mutation. Column visibility is empty. All appropriate parameters
* are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param timestamp timestamp
* @param value cell value
* @since 1.5.0
* @see #at()
*/
public void put(byte[] columnFamily, byte[] columnQualifier, long timestamp, byte[] value) {
put(columnFamily, columnQualifier, EMPTY_BYTES, true, timestamp, false, value);
}
/**
* Puts a modification in this mutation. All appropriate parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param timestamp timestamp
* @param value cell value
* @since 1.5.0
* @see #at()
*/
public void put(byte[] columnFamily, byte[] columnQualifier, ColumnVisibility columnVisibility,
long timestamp, byte[] value) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), true, timestamp, false,
value);
}
/**
* Puts a deletion in this mutation. Matches empty column visibility; timestamp is not set. All
* parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @since 1.5.0
* @see #at()
*/
public void putDelete(byte[] columnFamily, byte[] columnQualifier) {
put(columnFamily, columnQualifier, EMPTY_BYTES, false, 0L, true, EMPTY_BYTES);
}
/**
* Puts a deletion in this mutation. Timestamp is not set. All parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @since 1.5.0
* @see #at()
*/
public void putDelete(byte[] columnFamily, byte[] columnQualifier,
ColumnVisibility columnVisibility) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), false, 0L, true,
EMPTY_BYTES);
}
/**
* Puts a deletion in this mutation. Matches empty column visibility. All appropriate parameters
* are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param timestamp timestamp
* @since 1.5.0
* @see #at()
*/
public void putDelete(byte[] columnFamily, byte[] columnQualifier, long timestamp) {
put(columnFamily, columnQualifier, EMPTY_BYTES, true, timestamp, true, EMPTY_BYTES);
}
/**
* Puts a deletion in this mutation. All appropriate parameters are defensively copied.
*
* @param columnFamily column family
* @param columnQualifier column qualifier
* @param columnVisibility column visibility
* @param timestamp timestamp
* @since 1.5.0
* @see #at()
*/
public void putDelete(byte[] columnFamily, byte[] columnQualifier,
ColumnVisibility columnVisibility, long timestamp) {
put(columnFamily, columnQualifier, columnVisibility.getExpression(), true, timestamp, true,
EMPTY_BYTES);
}
/**
* Provides methods for setting the column family of a Mutation. The user can provide the family
* name as a byte array, CharSequence, ByteBuffer, or Text object instance and the backend will do
* the necessary transformation.
*
* All FamilyOptions methods return an instance derived from the QualifierOptions interface,
* allowing the methods to be semantically chained.
*
* @since 2.0.0
*/
public interface FamilyOptions extends QualifierOptions {
QualifierOptions family(byte[] colFam);
QualifierOptions family(ByteBuffer colFam);
QualifierOptions family(CharSequence colFam);
QualifierOptions family(Text colFam);
}
/**
* Provides methods for setting the column qualifier of a Mutation. The user can provide the
* qualifier name as a byte array, CharSequence, ByteBuffer, or Text object instance and the
* backend will do the necessary transformation.
*
* All QualifierOptions methods return an instance derived from the VisibilityOptions interface,
* allowing the methods to be semantically chained.
*
* @since 2.0.0
*/
public interface QualifierOptions extends VisibilityOptions {
VisibilityOptions qualifier(byte[] colQual);
VisibilityOptions qualifier(ByteBuffer colQual);
VisibilityOptions qualifier(CharSequence colQual);
VisibilityOptions qualifier(Text colQual);
}
/**
* Provides methods for setting the column visibility of a Mutation. The user can provide the
* visibility as a byte array or {@link org.apache.accumulo.core.security.ColumnVisibility} object
* instance and the backend will do the necessary transformation.
*
* All QualifierOptions methods return an instance derived from the VisibilityOptions interface,
* allowing the methods to be semantically chained.
*
* @since 2.0.0
*/
public interface VisibilityOptions extends TimestampOptions {
TimestampOptions visibility(byte[] colVis);
TimestampOptions visibility(ByteBuffer colVis);
TimestampOptions visibility(CharSequence colVis);
TimestampOptions visibility(ColumnVisibility colVis);
TimestampOptions visibility(Text colVis);
}
/**
* Provides methods for setting the timestamp of a Mutation. The user must provide the timestamp
* as a long.
*
* <p>
* All TimestampOptions methods return an instance derived from the MutationOptions interface,
* allowing the methods to be semantically chained.
*
* @since 2.0.0
*/
public interface TimestampOptions extends MutationOptions {
MutationOptions timestamp(long ts);
}
/**
* Provides methods for setting the value of a Mutation. The user can provide the value as a byte
* array, Value, or ByteBuffer object instance and the backend will do the necessary
* transformation.
*
* <p>
* All MutationOptions methods complete a fluent Mutation API method chain.
*
* @since 2.0.0
*/
public interface MutationOptions {
Mutation put(byte[] val);
Mutation put(ByteBuffer val);
Mutation put(CharSequence val);
Mutation put(Text val);
Mutation put(Value val);
Mutation delete();
}
/**
* Fluent API for putting or deleting to a Mutation that makes it easy use different types (i.e
* byte[], CharSequence, etc) when specifying the family, qualifier, value, etc.
*
* <p>
* Methods are optional but must follow this order: family, qualifier, visibility, timestamp.
*
* <p>
* The put and delete methods end the chain and add the modification to the Mutation.
*
* <p>
* The following is an example if using {@code at()} to put and delete. Notice how the example
* mixes {@code String} and {@code byte[]}.
*
* <pre>
* <code>
* Mutation m = new Mutation("row0017");
* m.at().family("001").qualifier(new byte[] {0,1}).put("v99");
* m.at().family("002").qualifier(new byte[] {0,1}).delete();
* </code>
* </pre>
*
* @return a new FamilyOptions object, starting the method chain
* @since 2.0.0
*/
public FamilyOptions at() {
return new Options();
}
// private inner class implementing all Options interfaces
private class Options implements FamilyOptions {
byte[] columnFamily;
int columnFamilyLength;
byte[] columnQualifier;
int columnQualifierLength;
byte[] columnVisibility = null;
int columnVisibilityLength;
boolean hasTs = false;
long timestamp;
private Options() {}
// methods for changing the column family of a Mutation
/**
* Sets the column family of a mutation.
*
* @param colFam column family
* @param colFamLength column family length
* @return a QualifierOptions object, advancing the method chain
*/
private QualifierOptions family(byte[] colFam, int colFamLength) {
columnFamily = colFam;
columnFamilyLength = colFamLength;
return this;
}
/**
* Sets the column family of a mutation.
*
* @param colFam column family
* @return a QualifierOptions object, advancing the method chain
*/
@Override
public QualifierOptions family(byte[] colFam) {
return family(colFam, colFam.length);
}
/**
* Sets the column family of a mutation.
*
* @param colFam column family
* @return a QualifierOptions object, advancing the method chain
*/
@Override
public QualifierOptions family(ByteBuffer colFam) {
return family(ByteBufferUtil.toBytes(colFam));
}
/**
* Sets the column family of a mutation.
*
* @param colFam column family
* @return a QualifierOptions object, advancing the method chain
*/
@Override
public QualifierOptions family(CharSequence colFam) {
return family(new Text(colFam.toString()));
}
/**
* Sets the column family of a mutation.
*
* @param colFam column family
* @return a QualifierOptions object, advancing the method chain
*/
@Override
public QualifierOptions family(Text colFam) {
return family(colFam.getBytes(), colFam.getLength());
}
/**
* Sets the column qualifier of a mutation.
*
* @param colQual column qualifier
* @param colQualLength column qualifier
* @return a VisibilityOptions object, advancing the method chain
*/
private VisibilityOptions qualifier(byte[] colQual, int colQualLength) {
columnQualifier = colQual;
columnQualifierLength = colQualLength;
return this;
}
/**
* Sets the column qualifier of a mutation.
*
* @param colQual column qualifier
* @return a VisibilityOptions object, advancing the method chain
*/
@Override
public VisibilityOptions qualifier(byte[] colQual) {
return qualifier(colQual, colQual.length);
}
/**
* Sets the column qualifier of a mutation.
*
* @param colQual column qualifier
* @return a VisibilityOptions object, advancing the method chain
*/
@Override
public VisibilityOptions qualifier(ByteBuffer colQual) {
return qualifier(ByteBufferUtil.toBytes(colQual));
}
/**
* Sets the column qualifier of a mutation.
*
* @param colQual column qualifier
* @return a VisibilityOptions object, advancing the method chain
*/
@Override
public VisibilityOptions qualifier(CharSequence colQual) {
return qualifier(new Text(colQual.toString()));
}
/**
* Sets the column qualifier of a mutation.
*
* @param colQual column qualifier
* @return a VisibilityOptions object, advancing the method chain
*/
@Override
public VisibilityOptions qualifier(Text colQual) {
return qualifier(colQual.getBytes(), colQual.getLength());
}
/**
* Sets the column visibility of a mutation.
*
* @param colVis column visibility
* @param colVisLen column visibility length
* @return a TimestampOptions object, advancing the method chain
*/
private TimestampOptions visibility(byte[] colVis, int colVisLen) {
columnVisibility = colVis;
columnVisibilityLength = colVisLen;
return this;
}
/**
* Sets the column visibility of a mutation.
*
* @param colVis column visibility
* @return a TimestampOptions object, advancing the method chain
*/
@Override
public TimestampOptions visibility(byte[] colVis) {
return visibility(colVis, colVis.length);
}
/**
* Sets the column visibility of a mutation.
*
* @param colVis column visibility
* @return a TimestampOptions object, advancing the method chain
*/
@Override
public TimestampOptions visibility(ByteBuffer colVis) {
return visibility(ByteBufferUtil.toBytes(colVis));
}
/**
* Sets the column visibility of a mutation.
*
* @param colVis column visibility
* @return a TimestampOptions object, advancing the method chain
*/
@Override
public TimestampOptions visibility(CharSequence colVis) {
return visibility(new Text(colVis.toString()));
}
/**
* Sets the column visibility of a mutation.
*
* @param colVis column visibility
* @return a TimestampOptions object, advancing the method chain
*/
@Override
public TimestampOptions visibility(ColumnVisibility colVis) {
return visibility(colVis.getExpression());
}
/**
* Sets the column visibility of a mutation.
*
* @param colVis column visibility
* @return a TimestampOptions object, advancing the method chain
*/
@Override
public TimestampOptions visibility(Text colVis) {
return visibility(colVis.copyBytes());
}
/**
* Sets the timestamp of a mutation.
*
* @param ts timestamp
* @return a MutationOptions object, advancing the method chain
*/
@Override
public MutationOptions timestamp(long ts) {
hasTs = true;
timestamp = ts;
return this;
}
/**
* Finalizes the method chain by filling the buffer with the gathered Mutation configuration
*
* @param val value
* @param delete deletion flag
*/
private Mutation put(byte[] val, boolean delete) {
if (buffer == null) {
throw new IllegalStateException("Can not add to mutation after serializing it");
}
// fill buffer with column family location
fill(columnFamily, columnFamilyLength);
// fill buffer with qualifier location
fill(columnQualifier, columnQualifierLength);
// fill buffer with visibility location
// if none given, fill with EMPTY_BYTES
if (columnVisibility == null) {
fill(EMPTY_BYTES, EMPTY_BYTES.length);
} else {
fill(columnVisibility, columnVisibilityLength);
}
// fill buffer with timestamp location
// if none given, skip
fill(hasTs);
if (hasTs) {
fill(timestamp);
}
// indicate if this is a deletion
fill(delete);
// fill buffer with value
if (val.length < VALUE_SIZE_COPY_CUTOFF) {
fill(val, val.length);
} else {
if (values == null) {
values = new ArrayList<>();
}
byte[] copy = new byte[val.length];
System.arraycopy(val, 0, copy, 0, val.length);
values.add(copy);
fill(-1 * values.size());
}
entries++;
return Mutation.this;
}
/**
* Ends method chain with a put of a byte[] value
*
* @param val value
*/
@Override
public Mutation put(byte[] val) {
return put(val, false);
}
/**
* Ends method chain with a put of a ByteBuffer value
*
* @param val value
*/
@Override
public Mutation put(ByteBuffer val) {
return put(ByteBufferUtil.toBytes(val), false);
}
/**
* Ends method chain with a put of a CharSequence value
*
* @param val value
*/
@Override
public Mutation put(CharSequence val) {
return put(new Text(val.toString()));
}
/**
* Ends method chain with a put of a Text value
*
* @param val value
*/
@Override
public Mutation put(Text val) {
return put(val.copyBytes(), false);
}
/**
* Ends method chain with a put of a Value object
*
* @param val value
*/
@Override
public Mutation put(Value val) {
return put(val.get(), false);
}
/**
* Ends method chain with a delete
*/
@Override
public Mutation delete() {
return put(EMPTY_BYTES, true);
}
}
private byte[] oldReadBytes(UnsynchronizedBuffer.Reader in) {
int len = in.readInt();
if (len == 0) {
return EMPTY_BYTES;
}
byte[] bytes = new byte[len];
in.readBytes(bytes);
return bytes;
}
private byte[] readBytes(UnsynchronizedBuffer.Reader in) {
int len = (int) in.readVLong();
if (len == 0) {
return EMPTY_BYTES;
}
byte[] bytes = new byte[len];
in.readBytes(bytes);
return bytes;
}
/**
* Gets the modifications and deletions in this mutation. After calling this method, further
* modifications to this mutation are ignored. Changes made to the returned updates do not affect
* this mutation.
*
* @return list of modifications and deletions
*/
public List<ColumnUpdate> getUpdates() {
serialize();
UnsynchronizedBuffer.Reader in = new UnsynchronizedBuffer.Reader(data);
if (updates == null) {
if (entries == 1) {
updates = Collections.singletonList(deserializeColumnUpdate(in));
} else {
ColumnUpdate[] tmpUpdates = new ColumnUpdate[entries];
for (int i = 0; i < entries; i++) {
tmpUpdates[i] = deserializeColumnUpdate(in);
}
updates = Arrays.asList(tmpUpdates);
}
}
return updates;
}
protected ColumnUpdate newColumnUpdate(byte[] cf, byte[] cq, byte[] cv, boolean hasts, long ts,
boolean deleted, byte[] val) {
return new ColumnUpdate(cf, cq, cv, hasts, ts, deleted, val);
}
private ColumnUpdate deserializeColumnUpdate(UnsynchronizedBuffer.Reader in) {
byte[] cf = readBytes(in);
byte[] cq = readBytes(in);
byte[] cv = readBytes(in);
boolean hasts = in.readBoolean();
long ts = 0;
if (hasts) {
ts = in.readVLong();
}
boolean deleted = in.readBoolean();
byte[] val;
int valLen = (int) in.readVLong();
if (valLen < 0) {
val = values.get((-1 * valLen) - 1);
} else if (valLen == 0) {
val = EMPTY_BYTES;
} else {
val = new byte[valLen];
in.readBytes(val);
}
return newColumnUpdate(cf, cq, cv, hasts, ts, deleted, val);
}
private int cachedValLens = -1;
/**
* Gets the byte length of all large values stored in this mutation.
*
* @return length of all large values
* @see #VALUE_SIZE_COPY_CUTOFF
*/
long getValueLengths() {
if (values == null) {
return 0;
}
if (cachedValLens == -1) {
int tmpCVL = 0;
for (byte[] val : values) {
tmpCVL += val.length;
}
cachedValLens = tmpCVL;
}
return cachedValLens;
}
/**
* Gets the total number of bytes in this mutation.
*
* @return length of mutation in bytes
*/
public long numBytes() {
serialize();
return row.length + data.length + getValueLengths();
}
/**
* Gets an estimate of the amount of memory used by this mutation. The estimate includes data
* sizes and object overhead.
*
* @return memory usage estimate
*/
public long estimatedMemoryUsed() {
return numBytes() + 238;
}
/**
* Gets the number of modifications / deletions in this mutation.
*
* @return the number of modifications / deletions
*/
public int size() {
return entries;
}
@Override
public void readFields(DataInput in) throws IOException {
// Clear out cached column updates and value lengths so
// that we recalculate them based on the (potentially) new
// data we are about to read in.
updates = null;
cachedValLens = -1;
buffer = null;
useOldDeserialize = false;
byte first = in.readByte();
if ((first & 0x80) != 0x80) {
oldReadFields(first, in);
useOldDeserialize = true;
return;
}
int len = WritableUtils.readVInt(in);
row = new byte[len];
in.readFully(row);
len = WritableUtils.readVInt(in);
data = new byte[len];
in.readFully(data);
entries = WritableUtils.readVInt(in);
boolean valuesPresent = (first & 0x01) == 0x01;
if (valuesPresent) {
values = new ArrayList<>();
int numValues = WritableUtils.readVInt(in);
for (int i = 0; i < numValues; i++) {
len = WritableUtils.readVInt(in);
byte[] val = new byte[len];
in.readFully(val);
values.add(val);
}
} else {
values = null;
}
if ((first & 0x02) == 0x02) {
int numMutations = WritableUtils.readVInt(in);
for (int i = 0; i < numMutations; i++) {
// consume the replication sources that may have been previously serialized
WritableUtils.readString(in);
}
}
}
protected void droppingOldTimestamp(long ts) {}
private void oldReadFields(byte first, DataInput in) throws IOException {
byte b = in.readByte();
byte c = in.readByte();
byte d = in.readByte();
int len = (((first & 0xff) << 24) | ((b & 0xff) << 16) | ((c & 0xff) << 8) | (d & 0xff));
row = new byte[len];
in.readFully(row);
len = in.readInt();
byte[] localData = new byte[len];
in.readFully(localData);
int localEntries = in.readInt();
List<byte[]> localValues;
boolean valuesPresent = in.readBoolean();
if (valuesPresent) {
localValues = new ArrayList<>();
int numValues = in.readInt();
for (int i = 0; i < numValues; i++) {
len = in.readInt();
byte[] val = new byte[len];
in.readFully(val);
localValues.add(val);
}
} else {
localValues = null;
}
// convert data to new format
UnsynchronizedBuffer.Reader din = new UnsynchronizedBuffer.Reader(localData);
buffer = new UnsynchronizedBuffer.Writer();
for (int i = 0; i < localEntries; i++) {
byte[] cf = oldReadBytes(din);
byte[] cq = oldReadBytes(din);
byte[] cv = oldReadBytes(din);
boolean hasts = din.readBoolean();
long ts = din.readLong();
boolean deleted = din.readBoolean();
byte[] val;
int valLen = din.readInt();
if (valLen < 0) {
val = localValues.get((-1 * valLen) - 1);
} else if (valLen == 0) {
val = EMPTY_BYTES;
} else {
val = new byte[valLen];
din.readBytes(val);
}
put(cf, cq, cv, hasts, ts, deleted, val);
if (!hasts) {
droppingOldTimestamp(ts);
}
}
serialize();
}
@Override
public void write(DataOutput out) throws IOException {
final byte[] integerBuffer = new byte[5];
serialize();
byte hasValues = (values == null) ? 0 : (byte) 1;
// When replication sources were supported, we used the 2nd least-significant bit to denote
// their presence, but this is no longer used; kept here for historical explanation only
// hasValues = (byte) (0x02 | hasValues);
out.write((byte) (0x80 | hasValues));
UnsynchronizedBuffer.writeVInt(out, integerBuffer, row.length);
out.write(row);
UnsynchronizedBuffer.writeVInt(out, integerBuffer, data.length);
out.write(data);
UnsynchronizedBuffer.writeVInt(out, integerBuffer, entries);
if ((0x01 & hasValues) == 0x01) {
UnsynchronizedBuffer.writeVInt(out, integerBuffer, values.size());
for (byte[] val : values) {
UnsynchronizedBuffer.writeVInt(out, integerBuffer, val.length);
out.write(val);
}
}
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
if (o != null && o.getClass().equals(this.getClass())) {
return equalMutation((Mutation) o);
}
return false;
}
@Override
public int hashCode() {
return serializedSnapshot().hashCode();
}
/**
* Checks if this mutation equals another. Two mutations are equal if they target the same row and
* have the same modifications and deletions, in order. This method may be removed in a future API
* revision in favor of {@link #equals(Object)}. See ACCUMULO-1627 for more information.
*
* @param m mutation to compare
* @return true if this mutation equals the other, false otherwise
*/
public boolean equals(Mutation m) {
return this.equals((Object) m);
}
private boolean equalMutation(Mutation m) {
ByteBuffer myData = serializedSnapshot();
ByteBuffer otherData = m.serializedSnapshot();
if (Arrays.equals(row, m.row) && entries == m.entries && myData.equals(otherData)) {
// If two mutations don't have the same
if (values == null && m.values == null) {
return true;
}
if (values != null && m.values != null && values.size() == m.values.size()) {
for (int i = 0; i < values.size(); i++) {
if (!Arrays.equals(values.get(i), m.values.get(i))) {
return false;
}
}
return true;
}
}
return false;
}
/**
* Creates a {@link org.apache.accumulo.core.dataImpl.thrift.TMutation} object containing this
* Mutation's data.
*
* Note that this method will move the Mutation into a "serialized" state that will prevent users
* from adding more data via Mutation#put().
*
* @return a thrift form of this Mutation
*/
public TMutation toThrift() {
return toThrift(true);
}
private TMutation toThrift(boolean serialize) {
if (serialize) {
this.serialize();
}
ByteBuffer data = serializedSnapshot();
return new TMutation(ByteBuffer.wrap(row), data, ByteBufferUtil.toByteBuffers(values), entries);
}
/**
* Gets the serialization format used to (de)serialize this mutation.
*
* @return serialization format
*/
protected SERIALIZED_FORMAT getSerializedFormat() {
return this.useOldDeserialize ? SERIALIZED_FORMAT.VERSION1 : SERIALIZED_FORMAT.VERSION2;
}
/**
* Creates a multi-lined, human-readable String for this mutation.
*
* This method creates many intermediate Strings and should not be used for large volumes of
* Mutations.
*
* @return A multi-lined, human-readable String for this mutation.
*
* @since 2.1.0
*/
public String prettyPrint() {
StringBuilder sb = new StringBuilder();
sb.append("mutation: ").append(new String(row, UTF_8)).append('\n');
for (ColumnUpdate update : getUpdates()) {
sb.append(" update: ");
sb.append(new String(update.getColumnFamily(), UTF_8));
sb.append(':');
sb.append(new String(update.getColumnQualifier(), UTF_8));
sb.append(" value ");
if (update.isDeleted()) {
sb.append("[delete]");
} else {
sb.append(new String(update.getValue(), UTF_8));
}
sb.append('\n');
}
return sb.toString();
}
}
| 9,922 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/ConditionalMutation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import static com.google.common.base.Preconditions.checkArgument;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.io.Text;
/**
* A Mutation that contains a list of conditions that must all be met before the mutation is
* applied.
*
* @since 1.6.0
*/
public class ConditionalMutation extends Mutation {
private List<Condition> conditions = new ArrayList<>();
public ConditionalMutation(byte[] row, Condition... conditions) {
super(row);
init(conditions);
}
public ConditionalMutation(byte[] row, int start, int length, Condition... conditions) {
super(row, start, length);
init(conditions);
}
public ConditionalMutation(Text row, Condition... conditions) {
super(row);
init(conditions);
}
public ConditionalMutation(CharSequence row, Condition... conditions) {
super(row);
init(conditions);
}
public ConditionalMutation(ByteSequence row, Condition... conditions) {
// TODO add ByteSequence methods to mutations
super(row.toArray());
init(conditions);
}
public ConditionalMutation(ConditionalMutation cm) {
super(cm);
this.conditions = new ArrayList<>(cm.conditions);
}
private void init(Condition... conditions) {
checkArgument(conditions != null, "conditions is null");
this.conditions.addAll(Arrays.asList(conditions));
}
public void addCondition(Condition condition) {
checkArgument(condition != null, "condition is null");
this.conditions.add(condition);
}
public List<Condition> getConditions() {
return Collections.unmodifiableList(conditions);
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
if (o == null || !(o instanceof ConditionalMutation)) {
return false;
}
ConditionalMutation cm = (ConditionalMutation) o;
if (!conditions.equals(cm.conditions)) {
return false;
}
return super.equals(o);
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 37 * result + conditions.hashCode();
return result;
}
}
| 9,923 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/PartialKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
/**
* Specifications for part of a {@link Key}.
*/
public enum PartialKey {
ROW(1),
ROW_COLFAM(2),
ROW_COLFAM_COLQUAL(3),
ROW_COLFAM_COLQUAL_COLVIS(4),
ROW_COLFAM_COLQUAL_COLVIS_TIME(5),
// everything with delete flag
ROW_COLFAM_COLQUAL_COLVIS_TIME_DEL(6);
int depth;
private PartialKey(int depth) {
this.depth = depth;
}
/**
* Gets the depth of this partial key.
*
* @return depth
*/
public int getDepth() {
return depth;
}
}
| 9,924 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/ByteSequence.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import java.io.Serializable;
import org.apache.hadoop.io.WritableComparator;
/**
* A sequence of bytes.
*/
public abstract class ByteSequence implements Comparable<ByteSequence>, Serializable {
private static final long serialVersionUID = 1L;
/**
* Gets a byte within this sequence.
*
* @param i index into sequence
* @return byte
* @throws IllegalArgumentException if i is out of range
*/
public abstract byte byteAt(int i);
/**
* Gets the length of this sequence.
*
* @return sequence length
*/
public abstract int length();
/**
* Returns a portion of this sequence.
*
* @param start index of subsequence start (inclusive)
* @param end index of subsequence end (exclusive)
*/
public abstract ByteSequence subSequence(int start, int end);
/**
* Returns a byte array containing the bytes in this sequence. This method may copy the sequence
* data or may return a backing byte array directly.
*
* @return byte array
*/
public abstract byte[] toArray();
/**
* Determines whether this sequence is backed by a byte array.
*
* @return true if sequence is backed by a byte array
*/
public abstract boolean isBackedByArray();
/**
* Gets the backing byte array for this sequence.
*
* @return byte array
*/
public abstract byte[] getBackingArray();
/**
* Gets the offset for this sequence. This value represents the starting point for the sequence in
* the backing array, if there is one.
*
* @return offset (inclusive)
*/
public abstract int offset();
/**
* Compares the two given byte sequences, byte by byte, returning a negative, zero, or positive
* result if the first sequence is less than, equal to, or greater than the second. The comparison
* is performed starting with the first byte of each sequence, and proceeds until a pair of bytes
* differs, or one sequence runs out of byte (is shorter). A shorter sequence is considered less
* than a longer one.
*
* @param bs1 first byte sequence to compare
* @param bs2 second byte sequence to compare
* @return comparison result
*/
public static int compareBytes(ByteSequence bs1, ByteSequence bs2) {
int minLen = Math.min(bs1.length(), bs2.length());
for (int i = 0; i < minLen; i++) {
int a = (bs1.byteAt(i) & 0xff);
int b = (bs2.byteAt(i) & 0xff);
if (a != b) {
return a - b;
}
}
return bs1.length() - bs2.length();
}
@Override
public int compareTo(ByteSequence obs) {
if (isBackedByArray() && obs.isBackedByArray()) {
return WritableComparator.compareBytes(getBackingArray(), offset(), length(),
obs.getBackingArray(), obs.offset(), obs.length());
}
return compareBytes(this, obs);
}
@Override
public boolean equals(Object o) {
if (o instanceof ByteSequence) {
ByteSequence obs = (ByteSequence) o;
if (this == o) {
return true;
}
if (length() != obs.length()) {
return false;
}
return compareTo(obs) == 0;
}
return false;
}
@Override
public int hashCode() {
int hash = 1;
if (isBackedByArray()) {
byte[] data = getBackingArray();
int end = offset() + length();
for (int i = offset(); i < end; i++) {
hash = (31 * hash) + data[i];
}
} else {
for (int i = 0; i < length(); i++) {
hash = (31 * hash) + byteAt(i);
}
}
return hash;
}
}
| 9,925 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/LoadPlan.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import org.apache.accumulo.core.client.admin.TableOperations.ImportMappingOptions;
import org.apache.hadoop.io.Text;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.primitives.UnsignedBytes;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* Information about where to load files into an Accumulo table.
*
* @see ImportMappingOptions#plan(LoadPlan)
* @since 2.0.0
*/
public class LoadPlan {
private final List<Destination> destinations;
private static byte[] copy(byte[] data) {
return data == null ? null : Arrays.copyOf(data, data.length);
}
private static byte[] copy(Text data) {
return data == null ? null : data.copyBytes();
}
private static byte[] copy(CharSequence data) {
return data == null ? null : data.toString().getBytes(UTF_8);
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "this code is validating the input")
private static String checkFileName(String fileName) {
Preconditions.checkArgument(Paths.get(fileName).getNameCount() == 1,
"Expected only filename, but got %s", fileName);
return fileName;
}
/**
* @since 2.0.0
*/
public enum RangeType {
/**
* Range that corresponds to one or more tablets in a table. For a range of this type, the start
* row and end row can be null. The start row is exclusive and the end row is inclusive (like
* Accumulo tablets). A common use case for this would be when files were partitioned using a
* table's splits. When using this range type, the start and end row must exist as splits in the
* table or an exception will be thrown at load time.
*/
TABLE,
/**
* Range that correspond to known rows in a file. For this range type, the start row and end row
* must be non-null. The start row and end row are both considered inclusive. At load time,
* these data ranges will be mapped to table ranges.
*/
FILE
}
/**
* Mapping of a file to a row range with an associated range type.
*
* @since 2.0.0
*/
public static class Destination {
private final String fileName;
private final byte[] startRow;
private final byte[] endRow;
private final RangeType rangeType;
private byte[] checkRow(RangeType type, byte[] row) {
if (type == RangeType.FILE && row == null) {
throw new IllegalArgumentException(
"Row can not be null when range type is " + RangeType.FILE);
}
return row;
}
private Destination(String fileName, RangeType rangeType, byte[] startRow, byte[] endRow) {
this.fileName = checkFileName(fileName);
this.rangeType = rangeType;
this.startRow = checkRow(rangeType, startRow);
this.endRow = checkRow(rangeType, endRow);
if (rangeType == RangeType.FILE) {
if (UnsignedBytes.lexicographicalComparator().compare(startRow, endRow) > 0) {
String srs = new String(startRow, UTF_8);
String ers = new String(endRow, UTF_8);
throw new IllegalArgumentException(
"Start row is greater than end row : " + srs + " " + ers);
}
} else if (rangeType == RangeType.TABLE) {
if (startRow != null && endRow != null
&& UnsignedBytes.lexicographicalComparator().compare(startRow, endRow) >= 0) {
String srs = new String(startRow, UTF_8);
String ers = new String(endRow, UTF_8);
throw new IllegalArgumentException(
"Start row is greater than or equal to end row : " + srs + " " + ers);
}
} else {
throw new IllegalStateException();
}
}
public String getFileName() {
return fileName;
}
public byte[] getStartRow() {
return copy(startRow);
}
public byte[] getEndRow() {
return copy(endRow);
}
public RangeType getRangeType() {
return rangeType;
}
}
private LoadPlan(List<Destination> destinations) {
this.destinations = destinations;
}
public Collection<Destination> getDestinations() {
return destinations;
}
/**
* @since 2.0.0
*/
public interface Builder {
/**
* Specify the row range where a file should be loaded. Note that whether the startRow parameter
* is inclusive or exclusive is determined by the {@link RangeType} parameter.
*
* @param fileName this should not be a path. Only a file name because loads are expected to
* happen from a single directory.
*/
Builder loadFileTo(String fileName, RangeType rangeType, Text startRow, Text endRow);
/**
* Specify the row range where a file should be loaded. Note that whether the startRow parameter
* is inclusive or exclusive is determined by the {@link RangeType} parameter.
*
* @param fileName this should not be a path. Only a file name because loads are expected to
* happen from a single directory.
*/
Builder loadFileTo(String fileName, RangeType rangeType, byte[] startRow, byte[] endRow);
/**
* Specify the row range where a file should be loaded. Note that whether the startRow parameter
* is inclusive or exclusive is determined by the {@link RangeType} parameter.
*
* @param fileName this should not be a path. Only a file name because loads are expected to
* happen from a single directory.
*/
Builder loadFileTo(String fileName, RangeType rangeType, CharSequence startRow,
CharSequence endRow);
Builder addPlan(LoadPlan plan);
LoadPlan build();
}
public static Builder builder() {
return new Builder() {
final ImmutableList.Builder<Destination> fmb = ImmutableList.builder();
@Override
public Builder loadFileTo(String fileName, RangeType rangeType, Text startRow, Text endRow) {
fmb.add(new Destination(fileName, rangeType, copy(startRow), copy(endRow)));
return this;
}
@Override
public Builder loadFileTo(String fileName, RangeType rangeType, byte[] startRow,
byte[] endRow) {
fmb.add(new Destination(fileName, rangeType, copy(startRow), copy(endRow)));
return this;
}
@Override
public Builder loadFileTo(String fileName, RangeType rangeType, CharSequence startRow,
CharSequence endRow) {
fmb.add(new Destination(fileName, rangeType, copy(startRow), copy(endRow)));
return this;
}
@Override
public Builder addPlan(LoadPlan plan) {
fmb.addAll(plan.getDestinations());
return this;
}
@Override
public LoadPlan build() {
return new LoadPlan(fmb.build());
}
};
}
}
| 9,926 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/InstanceId.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import java.util.Objects;
import java.util.UUID;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
/**
* A strongly typed representation of an Accumulo instance ID. The constructor for this class will
* throw an error if the canonical parameter is null.
*
* @since 2.1.0
*/
public class InstanceId extends AbstractId<InstanceId> {
private static final long serialVersionUID = 1L;
// cache is for canonicalization/deduplication of created objects,
// to limit the number of InstanceId objects in the JVM at any given moment
// WeakReferences are used because we don't need them to stick around any longer than they need to
static final Cache<String,InstanceId> cache = Caffeine.newBuilder().weakValues().build();
private InstanceId(String canonical) {
super(canonical);
}
/**
* Get a InstanceId object for the provided canonical string. This is guaranteed to be non-null
*
* @param canonical Instance ID string
* @return InstanceId object
*/
public static InstanceId of(final String canonical) {
return cache.get(canonical, k -> new InstanceId(canonical));
}
/**
* Get a InstanceId object for the provided uuid. This is guaranteed to be non-null
*
* @param uuid UUID object
* @return InstanceId object
*/
public static InstanceId of(final UUID uuid) {
return of(Objects.requireNonNull(uuid, "uuid cannot be null").toString());
}
}
| 9,927 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/ColumnUpdate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import java.util.Arrays;
/**
* A single column and value pair within a {@link Mutation}.
*/
public class ColumnUpdate {
private byte[] columnFamily;
private byte[] columnQualifier;
private byte[] columnVisibility;
private long timestamp;
private boolean hasTimestamp;
private byte[] val;
private boolean deleted;
/**
* Creates a new column update.
*
* @param cf column family
* @param cq column qualifier
* @param cv column visibility
* @param hasts true if the update specifies a timestamp
* @param ts timestamp
* @param deleted delete marker
* @param val cell value
*/
public ColumnUpdate(byte[] cf, byte[] cq, byte[] cv, boolean hasts, long ts, boolean deleted,
byte[] val) {
this.columnFamily = cf;
this.columnQualifier = cq;
this.columnVisibility = cv;
this.hasTimestamp = hasts;
this.timestamp = ts;
this.deleted = deleted;
this.val = val;
}
/**
* Gets whether this update specifies a timestamp.
*
* @return true if this update specifies a timestamp
*/
public boolean hasTimestamp() {
return hasTimestamp;
}
/**
* Gets the column family for this update. Not a defensive copy.
*
* @return column family
*/
public byte[] getColumnFamily() {
return columnFamily;
}
/**
* Gets the column qualifier for this update. Not a defensive copy.
*
* @return column qualifier
*/
public byte[] getColumnQualifier() {
return columnQualifier;
}
/**
* Gets the column visibility for this update.
*
* @return column visibility
*/
public byte[] getColumnVisibility() {
return columnVisibility;
}
/**
* Gets the timestamp for this update.
*
* @return timestamp
*/
public long getTimestamp() {
return this.timestamp;
}
/**
* Gets the delete marker for this update.
*
* @return delete marker
*/
public boolean isDeleted() {
return this.deleted;
}
/**
* Gets the cell value for this update.
*
* @return cell value
*/
public byte[] getValue() {
return this.val;
}
@Override
public String toString() {
return Arrays.toString(columnFamily) + ":" + Arrays.toString(columnQualifier) + " ["
+ Arrays.toString(columnVisibility) + "] " + (hasTimestamp ? timestamp : "NO_TIME_STAMP")
+ " " + Arrays.toString(val) + " " + deleted;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof ColumnUpdate)) {
return false;
}
ColumnUpdate upd = (ColumnUpdate) obj;
return Arrays.equals(getColumnFamily(), upd.getColumnFamily())
&& Arrays.equals(getColumnQualifier(), upd.getColumnQualifier())
&& Arrays.equals(getColumnVisibility(), upd.getColumnVisibility())
&& isDeleted() == upd.isDeleted() && Arrays.equals(getValue(), upd.getValue())
&& hasTimestamp() == upd.hasTimestamp() && getTimestamp() == upd.getTimestamp();
}
@Override
public int hashCode() {
return Arrays.hashCode(columnFamily) + Arrays.hashCode(columnQualifier)
+ Arrays.hashCode(columnVisibility)
+ (hasTimestamp ? (Boolean.TRUE.hashCode() + Long.valueOf(timestamp).hashCode())
: Boolean.FALSE.hashCode())
+ (deleted ? Boolean.TRUE.hashCode() : (Boolean.FALSE.hashCode() + Arrays.hashCode(val)));
}
}
| 9,928 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/NamespaceId.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
/**
* A strongly typed representation of a namespace ID. This class cannot be used to get a namespace
* ID from a namespace name, but does provide the namespace ID string wrapped with a stronger type.
* The constructor for this class will throw an error if the canonical parameter is null.
*
* @since 2.0.0
*/
public class NamespaceId extends AbstractId<NamespaceId> {
private static final long serialVersionUID = 1L;
// cache is for canonicalization/deduplication of created objects,
// to limit the number of NamespaceId objects in the JVM at any given moment
// WeakReferences are used because we don't need them to stick around any longer than they need to
static final Cache<String,NamespaceId> cache = Caffeine.newBuilder().weakValues().build();
private NamespaceId(String canonical) {
super(canonical);
}
/**
* Get a NamespaceId object for the provided canonical string. This is guaranteed to be non-null
*
* @param canonical Namespace ID string
* @return NamespaceId object
*/
public static NamespaceId of(final String canonical) {
return cache.get(canonical, k -> new NamespaceId(canonical));
}
}
| 9,929 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/AbstractId.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import java.io.Serializable;
import java.util.Objects;
/**
* An abstract identifier class for comparing equality of identifiers of the same type.
*
* @since 2.0.0
*/
public abstract class AbstractId<T extends AbstractId<T>> implements Comparable<T>, Serializable {
private static final long serialVersionUID = 1L;
private final String canonical;
protected AbstractId(final String canonical) {
this.canonical = Objects.requireNonNull(canonical, "canonical cannot be null");
}
/**
* The canonical ID. This is guaranteed to be non-null.
*/
public final String canonical() {
return canonical;
}
/**
* AbstractID objects are considered equal if, and only if, they are of the same type and have the
* same canonical identifier.
*/
@Override
public boolean equals(final Object obj) {
return this == obj || (obj != null && Objects.equals(getClass(), obj.getClass())
&& Objects.equals(canonical(), ((AbstractId<?>) obj).canonical()));
}
@Override
public int hashCode() {
return canonical().hashCode();
}
/**
* Returns a string of the canonical ID. This is guaranteed to be non-null.
*/
@Override
public final String toString() {
return canonical();
}
@Override
public int compareTo(T other) {
return canonical().compareTo(Objects.requireNonNull(other, "other cannot be null").canonical());
}
}
| 9,930 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/Column.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Comparator;
import org.apache.accumulo.core.dataImpl.thrift.TColumn;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
/**
* A column, specified by family, qualifier, and visibility.
*/
public class Column implements WritableComparable<Column> {
private static final Comparator<byte[]> BYTE_COMPARATOR = Comparator
.nullsFirst((a, b) -> WritableComparator.compareBytes(a, 0, a.length, b, 0, b.length));
private static final Comparator<Column> COMPARATOR =
Comparator.comparing(Column::getColumnFamily, BYTE_COMPARATOR)
.thenComparing(Column::getColumnQualifier, BYTE_COMPARATOR)
.thenComparing(Column::getColumnVisibility, BYTE_COMPARATOR);
/**
* Compares this column to another. Column families are compared first, then qualifiers, then
* visibilities.
*
* @param that column to compare
* @return comparison result
*/
@Override
public int compareTo(Column that) {
return COMPARATOR.compare(this, that);
}
@Override
public void readFields(DataInput in) throws IOException {
if (in.readBoolean()) {
int len = in.readInt();
columnFamily = new byte[len];
in.readFully(columnFamily);
} else {
columnFamily = null;
}
if (in.readBoolean()) {
int len = in.readInt();
columnQualifier = new byte[len];
in.readFully(columnQualifier);
} else {
columnQualifier = null;
}
if (in.readBoolean()) {
int len = in.readInt();
columnVisibility = new byte[len];
in.readFully(columnVisibility);
} else {
columnVisibility = null;
}
}
@Override
public void write(DataOutput out) throws IOException {
if (columnFamily == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeInt(columnFamily.length);
out.write(columnFamily);
}
if (columnQualifier == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeInt(columnQualifier.length);
out.write(columnQualifier);
}
if (columnVisibility == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeInt(columnVisibility.length);
out.write(columnVisibility);
}
}
public byte[] columnFamily;
public byte[] columnQualifier;
public byte[] columnVisibility;
/**
* Creates a new blank column.
*/
public Column() {}
/**
* Creates a new column.
*
* @param columnFamily family
* @param columnQualifier qualifier
* @param columnVisibility visibility
*/
public Column(byte[] columnFamily, byte[] columnQualifier, byte[] columnVisibility) {
this();
this.columnFamily = columnFamily;
this.columnQualifier = columnQualifier;
this.columnVisibility = columnVisibility;
}
/**
* Creates a new column.
*
* @param tcol Thrift column
*/
public Column(TColumn tcol) {
this(toBytes(tcol.columnFamily), toBytes(tcol.columnQualifier), toBytes(tcol.columnVisibility));
}
@Override
public boolean equals(Object that) {
if (that == null) {
return false;
}
if (that instanceof Column) {
return this.equals((Column) that);
}
return false;
}
/**
* Checks if this column equals another.
*
* @param that column to compare
* @return true if this column equals that, false otherwise
*/
public boolean equals(Column that) {
return this.compareTo(that) == 0;
}
private static int hash(byte[] b) {
if (b == null) {
return 0;
}
return WritableComparator.hashBytes(b, b.length);
}
@Override
public int hashCode() {
return hash(columnFamily) + hash(columnQualifier) + hash(columnVisibility);
}
/**
* Gets the column family. Not a defensive copy.
*
* @return family
*/
public byte[] getColumnFamily() {
return columnFamily;
}
/**
* Gets the column qualifier. Not a defensive copy.
*
* @return qualifier
*/
public byte[] getColumnQualifier() {
return columnQualifier;
}
/**
* Gets the column visibility. Not a defensive copy.
*
* @return visibility
*/
public byte[] getColumnVisibility() {
return columnVisibility;
}
/**
* Gets a string representation of this column. The family, qualifier, and visibility are
* interpreted as strings using the UTF-8 encoding; nulls are interpreted as empty strings.
*
* @return string form of column
*/
@Override
public String toString() {
return new String(columnFamily == null ? new byte[0] : columnFamily, UTF_8) + ":"
+ new String(columnQualifier == null ? new byte[0] : columnQualifier, UTF_8) + ":"
+ new String(columnVisibility == null ? new byte[0] : columnVisibility, UTF_8);
}
/**
* Converts this column to Thrift.
*
* @return Thrift column
*/
public TColumn toThrift() {
return new TColumn(columnFamily == null ? null : ByteBuffer.wrap(columnFamily),
columnQualifier == null ? null : ByteBuffer.wrap(columnQualifier),
columnVisibility == null ? null : ByteBuffer.wrap(columnVisibility));
}
}
| 9,931 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.Arrays;
import org.apache.accumulo.core.util.ByteBufferUtil;
/**
* An implementation of {@link ByteSequence} that uses a backing byte array.
*/
public class ArrayByteSequence extends ByteSequence implements Serializable {
private static final long serialVersionUID = 1L;
protected byte[] data;
protected int offset;
protected int length;
/**
* Creates a new sequence. The given byte array is used directly as the backing array, so later
* changes made to the array reflect into the new sequence.
*
* @param data byte data
*/
public ArrayByteSequence(byte[] data) {
this.data = data;
this.offset = 0;
this.length = data.length;
}
/**
* Creates a new sequence from a subsequence of the given byte array. The given byte array is used
* directly as the backing array, so later changes made to the (relevant portion of the) array
* reflect into the new sequence.
*
* @param data byte data
* @param offset starting offset in byte array (inclusive)
* @param length number of bytes to include in sequence
* @throws IllegalArgumentException if the offset or length are out of bounds for the given byte
* array
*/
public ArrayByteSequence(byte[] data, int offset, int length) {
if (offset < 0 || offset > data.length || length < 0 || (offset + length) > data.length) {
throw new IllegalArgumentException(" Bad offset and/or length data.length = " + data.length
+ " offset = " + offset + " length = " + length);
}
this.data = data;
this.offset = offset;
this.length = length;
}
/**
* Creates a new sequence from the given string. The bytes are determined from the string using
* the default platform encoding.
*
* @param s string to represent as bytes
*/
public ArrayByteSequence(String s) {
this(s.getBytes(UTF_8));
}
/**
* Creates a new sequence based on a byte buffer. If the byte buffer has an array, that array (and
* the buffer's offset and limit) are used; otherwise, a new backing array is created and a
* relative bulk get is performed to transfer the buffer's contents (starting at its current
* position and not beyond its limit).
*
* @param buffer byte buffer
*/
public ArrayByteSequence(ByteBuffer buffer) {
if (buffer.hasArray()) {
this.data = buffer.array();
this.offset = buffer.position() + buffer.arrayOffset();
this.length = buffer.remaining();
} else {
this.offset = 0;
this.data = ByteBufferUtil.toBytes(buffer);
this.length = data.length;
}
}
private static byte[] copy(ByteSequence bs) {
if (bs.isBackedByArray()) {
return Arrays.copyOfRange(bs.getBackingArray(), bs.offset(), bs.offset() + bs.length());
} else {
return bs.toArray();
}
}
/**
* Copy constructor. Copies contents of byteSequence.
*
* @since 2.0.0
*/
public ArrayByteSequence(ByteSequence byteSequence) {
this(copy(byteSequence));
}
@Override
public byte byteAt(int i) {
if (i < 0) {
throw new IllegalArgumentException("i < 0, " + i);
}
if (i >= length) {
throw new IllegalArgumentException("i >= length, " + i + " >= " + length);
}
return data[offset + i];
}
@Override
public byte[] getBackingArray() {
return data;
}
@Override
public boolean isBackedByArray() {
return true;
}
@Override
public int length() {
return length;
}
@Override
public int offset() {
return offset;
}
@Override
public ByteSequence subSequence(int start, int end) {
if (start > end || start < 0 || end > length) {
throw new IllegalArgumentException("Bad start and/end start = " + start + " end=" + end
+ " offset=" + offset + " length=" + length);
}
return new ArrayByteSequence(data, offset + start, end - start);
}
@Override
public byte[] toArray() {
if (offset == 0 && length == data.length) {
return data;
}
byte[] copy = new byte[length];
System.arraycopy(data, offset, copy, 0, length);
return copy;
}
@Override
public String toString() {
return new String(data, offset, length, UTF_8);
}
}
| 9,932 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/KeyBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.hadoop.io.Text;
/**
* A builder used to build {@link Key}s by defining their components.
*
* The rules are:
* <ul>
* <li>All components of the {@link Key} are optional except the row</li>
* <li>Components not explicitly set default to empty byte array except the timestamp which defaults
* to <code>Long.MAX_VALUE</code></li>
* <li>The column qualifier can only be set if the column family has been set first</li>
* <li>The column visibility can only be set if at least the column family has been set first</li>
* </ul>
*
* The builder supports three types of components: <code>byte[]</code>, <code>Text</code> and
* <code>CharSequence</code>. <code>CharSequence</code>s must be UTF-8 encoded.
*
* The builder is mutable and not thread safe.
*
* @see org.apache.accumulo.core.data.Key
* @since 2.0
*/
public class KeyBuilder {
/**
* Base Builder interface which can be used to set the {@link Key} timestamp and delete marker and
* to build the {@link Key}.
*
* @since 2.0
*/
public interface Build {
/**
* Build a {@link Key} from this builder.
*
* @return the {@link Key} built from this builder
*/
Key build();
/**
* Change the timestamp of the {@link Key} created.
*
* @param timestamp the timestamp to use for the {@link Key}
* @return this builder
*/
Build timestamp(long timestamp);
/**
* Set the deleted marker of the {@link Key} to the parameter.
*
* @param deleted if the {@link Key} should be marked as deleted or not
* @return this builder
*/
Build deleted(boolean deleted);
}
/**
* Builder step used to set the row part of the {@link Key}.
*
* @since 2.0
*/
public interface RowStep extends Build {
/**
* Set the row of the {@link Key} that this builder will build to the parameter.
*
* @param row the row to use for the key
* @return this builder
*/
ColumnFamilyStep row(final Text row);
/**
* Set the row of the {@link Key} that this builder will build to the parameter.
*
* @param row the row to use for the key
* @return this builder
*/
ColumnFamilyStep row(final byte[] row);
/**
* Set the row of the {@link Key} that this builder will build to the parameter.
*
* @param row the row to use for the key
* @param offset the offset within the array of the first byte to be read; must be non-negative
* and no larger than row.length
* @param length the number of bytes to be read from the given array; must be non-negative and
* no larger than row.length - offset
* @return this builder
*/
ColumnFamilyStep row(final byte[] row, int offset, int length);
/**
* Set the row of the {@link Key} that this builder will build to the parameter.
*
* @param row the row to use for the key. The encoding must be UTF-8
* @return this builder
*/
ColumnFamilyStep row(final CharSequence row);
}
/**
* Builder step used to set the columnFamily part of the {@link Key}.
*
* @since 2.0
*/
public interface ColumnFamilyStep extends ColumnVisibilityStep {
/**
* Set the column family of the {@link Key} that this builder will build to the parameter.
*
* @param columnFamily the column family to use for the {@link Key}
* @return this builder
*/
ColumnQualifierStep family(final byte[] columnFamily);
/**
* Set the column family of the {@link Key} that this builder will build to the parameter.
*
* @param columnFamily the column family to use for the {@link Key}
* @param offset the offset within the array of the first byte to be read; must be non-negative
* and no larger than row.length
* @param length the number of bytes to be read from the given array; must be non-negative and
* no larger than row.length - offset
* @return this builder
*/
ColumnQualifierStep family(final byte[] columnFamily, int offset, int length);
/**
* Set the column family of the {@link Key} that this builder will build to the parameter.
*
* @param columnFamily the column family to use for the {@link Key}
* @return this builder
*/
ColumnQualifierStep family(final Text columnFamily);
/**
* Set the column family of the {@link Key} that this builder will build to the parameter.
*
* @param columnFamily the column family to use for the {@link Key}. The encoding must be UTF-8
* @return this builder
*/
ColumnQualifierStep family(final CharSequence columnFamily);
}
/**
* Builder step used to set the column qualifier part of the {@link Key}.
*
* @since 2.0
*/
public interface ColumnQualifierStep extends ColumnVisibilityStep {
/**
* Set the column qualifier of the {@link Key} that this builder will build to the parameter.
*
* @param columnQualifier the column qualifier to use for the {@link Key}
* @return this builder
*/
ColumnVisibilityStep qualifier(final byte[] columnQualifier);
/**
* Set the column qualifier of the {@link Key} that this builder will build to the parameter.
*
* @param columnQualifier the column qualifier to use for the {@link Key}
* @param offset the offset within the array of the first byte to be read; must be non-negative
* and no larger than row.length
* @param length the number of bytes to be read from the given array; must be non-negative and
* no larger than row.length - offset
* @return this builder
*/
ColumnVisibilityStep qualifier(final byte[] columnQualifier, int offset, int length);
/**
* Set the column qualifier of the {@link Key} that this builder will build to the parameter.
*
* @param columnQualifier the column qualifier to use for the {@link Key}
* @return this builder
*/
ColumnVisibilityStep qualifier(final Text columnQualifier);
/**
* Set the column qualifier of the {@link Key} that this builder will build to the parameter.
*
* @param columnQualifier the column qualifier to use for the {@link Key}. The encoding must be
* UTF-8
* @return this builder
*/
ColumnVisibilityStep qualifier(final CharSequence columnQualifier);
}
/**
* Builder step used to set the column visibility part of the {@link Key}.
*
* @since 2.0
*/
public interface ColumnVisibilityStep extends Build {
/**
* Set the column qualifier of the {@link Key} that this builder will build to the parameter.
*
* @param columnVisibility the column visibility to use for the {@link Key}
* @return this builder
*/
Build visibility(final byte[] columnVisibility);
/**
* Set the column qualifier of the {@link Key} that this builder will build to the parameter.
*
* @param columnVisibility the column visibility to use for the {@link Key}
* @param offset the offset within the array of the first byte to be read; must be non-negative
* and no larger than row.length
* @param length the number of bytes to be read from the given array; must be non-negative and
* no larger than row.length - offset
* @return this builder
*/
Build visibility(final byte[] columnVisibility, int offset, int length);
/**
* Set the column qualifier of the {@link Key} that this builder will build to the parameter.
*
* @param columnVisibility the column visibility to use for the {@link Key}
* @return this builder
*/
Build visibility(final Text columnVisibility);
/**
* Set the column qualifier of the {@link Key} that this builder will build to the parameter.
*
* @param columnVisibility the column visibility to use for the {@link Key}. The encoding must
* be UTF-8
* @return this builder
*/
Build visibility(final CharSequence columnVisibility);
/**
* Set the column qualifier of the {@link Key} that this builder will build to the parameter.
*
* @param columnVisibility the column visibility to use for the {@link Key}
* @return this builder
*/
Build visibility(final ColumnVisibility columnVisibility);
}
/**
* @since 2.0
*/
static class KeyBuilderImpl
implements RowStep, ColumnFamilyStep, ColumnQualifierStep, ColumnVisibilityStep {
protected static final byte[] EMPTY_BYTES = new byte[0];
private final boolean copyBytes;
private byte[] row = EMPTY_BYTES;
private int rowOffset = 0;
private int rowLength = 0;
private byte[] family = EMPTY_BYTES;
private int familyOffset = 0;
private int familyLength = 0;
private byte[] qualifier = EMPTY_BYTES;
private int qualifierOffset = 0;
private int qualifierLength = 0;
private byte[] visibility = EMPTY_BYTES;
private int visibilityOffset = 0;
private int visibilityLength = 0;
private long timestamp = Long.MAX_VALUE;
private boolean deleted = false;
KeyBuilderImpl(boolean copyBytes) {
this.copyBytes = copyBytes;
}
private byte[] copyBytesIfNeeded(final byte[] bytes, int offset, int length) {
return Key.copyIfNeeded(bytes, offset, length, this.copyBytes);
}
private byte[] encodeCharSequence(CharSequence chars) {
return chars.toString().getBytes(UTF_8);
}
@Override
public ColumnFamilyStep row(final byte[] row, int offset, int length) {
this.row = copyBytesIfNeeded(row, offset, length);
this.rowOffset = this.copyBytes ? 0 : offset;
this.rowLength = this.copyBytes ? this.row.length : length;
return this;
}
@Override
public ColumnFamilyStep row(final byte[] row) {
return row(row, 0, row.length);
}
@Override
public ColumnFamilyStep row(final Text row) {
return row(row.getBytes(), 0, row.getLength());
}
@Override
public ColumnFamilyStep row(final CharSequence row) {
return row(encodeCharSequence(row));
}
@Override
public ColumnQualifierStep family(final byte[] family, int offset, int length) {
this.family = copyBytesIfNeeded(family, offset, length);
this.familyOffset = this.copyBytes ? 0 : offset;
this.familyLength = this.copyBytes ? this.family.length : length;
return this;
}
@Override
public ColumnQualifierStep family(final byte[] family) {
return family(family, 0, family.length);
}
@Override
public ColumnQualifierStep family(Text family) {
return family(family.getBytes(), 0, family.getLength());
}
@Override
public ColumnQualifierStep family(CharSequence family) {
return family(encodeCharSequence(family));
}
@Override
public ColumnVisibilityStep qualifier(final byte[] qualifier, int offset, int length) {
this.qualifier = copyBytesIfNeeded(qualifier, offset, length);
this.qualifierOffset = this.copyBytes ? 0 : offset;
this.qualifierLength = this.copyBytes ? this.qualifier.length : length;
return this;
}
@Override
public ColumnVisibilityStep qualifier(final byte[] qualifier) {
return qualifier(qualifier, 0, qualifier.length);
}
@Override
public ColumnVisibilityStep qualifier(Text qualifier) {
return qualifier(qualifier.getBytes(), 0, qualifier.getLength());
}
@Override
public ColumnVisibilityStep qualifier(CharSequence qualifier) {
return qualifier(encodeCharSequence(qualifier));
}
@Override
public Build visibility(final byte[] visibility, int offset, int length) {
this.visibility = copyBytesIfNeeded(visibility, offset, length);
this.visibilityOffset = this.copyBytes ? 0 : offset;
this.visibilityLength = this.copyBytes ? this.visibility.length : length;
return this;
}
@Override
public Build visibility(final byte[] visibility) {
return visibility(visibility, 0, visibility.length);
}
@Override
public Build visibility(Text visibility) {
return visibility(visibility.getBytes(), 0, visibility.getLength());
}
@Override
public Build visibility(CharSequence visibility) {
return visibility(encodeCharSequence(visibility));
}
@Override
public Build visibility(ColumnVisibility visibility) {
byte[] expr = visibility.getExpression();
return visibility(expr, 0, expr.length);
}
@Override
public final Build timestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
@Override
public Build deleted(boolean deleted) {
this.deleted = deleted;
return this;
}
@Override
public Key build() {
return new Key(this.row, this.rowOffset, this.rowLength, this.family, this.familyOffset,
this.familyLength, this.qualifier, this.qualifierOffset, this.qualifierLength,
this.visibility, this.visibilityOffset, this.visibilityLength, this.timestamp,
this.deleted, false);
}
}
}
| 9,933 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/Range.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InvalidObjectException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.apache.accumulo.core.dataImpl.thrift.TRange;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
/**
* This class is used to specify a range of Accumulo keys.
*
* @see Key
*/
public class Range implements WritableComparable<Range> {
private Key start;
private Key stop;
private boolean startKeyInclusive;
private boolean stopKeyInclusive;
private boolean infiniteStartKey;
private boolean infiniteStopKey;
/**
* Creates a range that goes from negative to positive infinity
*/
public Range() {
this((Key) null, true, (Key) null, true);
}
/**
* Creates a range from startKey inclusive to endKey inclusive.
*
* @param startKey starting key; set to null for negative infinity
* @param endKey ending key; set to null for positive infinity
* @throws IllegalArgumentException if end key is before start key
*/
public Range(Key startKey, Key endKey) {
this(startKey, true, endKey, true);
}
/**
* Creates a range that covers an entire row.
*
* @param row row to cover; set to null to cover all rows
*/
public Range(CharSequence row) {
this(row, true, row, true);
}
/**
* Creates a range that covers an entire row.
*
* @param row row to cover; set to null to cover all rows
*/
public Range(Text row) {
this(row, true, row, true);
}
/**
* Creates a range from startRow inclusive to endRow inclusive.
*
* @param startRow starting row; set to null for negative infinity
* @param endRow ending row; set to null for positive infinity
* @throws IllegalArgumentException if end row is before start row
*/
public Range(Text startRow, Text endRow) {
this(startRow, true, endRow, true);
}
/**
* Creates a range from startRow inclusive to endRow inclusive.
*
* @param startRow starting row; set to null for negative infinity
* @param endRow ending row; set to null for positive infinity
* @throws IllegalArgumentException if end row is before start row
*/
public Range(CharSequence startRow, CharSequence endRow) {
this(startRow, true, endRow, true);
}
/**
* Creates a range from startRow to endRow.
*
* @param startRow starting row; set to null for negative infinity
* @param startRowInclusive true to include start row, false to skip
* @param endRow ending row; set to null for positive infinity
* @param endRowInclusive true to include end row, false to skip
* @throws IllegalArgumentException if end row is before start row
*/
public Range(Text startRow, boolean startRowInclusive, Text endRow, boolean endRowInclusive) {
this(
(startRow == null ? null
: (startRowInclusive ? new Key(startRow)
: new Key(startRow).followingKey(PartialKey.ROW))),
true,
(endRow == null ? null
: (endRowInclusive ? new Key(endRow).followingKey(PartialKey.ROW) : new Key(endRow))),
false);
}
/**
* Creates a range from startRow to endRow.
*
* @param startRow starting row; set to null for negative infinity
* @param startRowInclusive true to include start row, false to skip
* @param endRow ending row; set to null for positive infinity
* @param endRowInclusive true to include end row, false to skip
* @throws IllegalArgumentException if end row is before start row
*/
public Range(CharSequence startRow, boolean startRowInclusive, CharSequence endRow,
boolean endRowInclusive) {
this(startRow == null ? null : new Text(startRow.toString()), startRowInclusive,
endRow == null ? null : new Text(endRow.toString()), endRowInclusive);
}
/**
* Creates a range from startKey to endKey.
*
* @param startKey starting key; set to null for negative infinity
* @param startKeyInclusive true to include start key, false to skip
* @param endKey ending key; set to null for positive infinity
* @param endKeyInclusive true to include end key, false to skip
* @throws IllegalArgumentException if end key is before start key
*/
public Range(Key startKey, boolean startKeyInclusive, Key endKey, boolean endKeyInclusive) {
this.start = startKey;
this.startKeyInclusive = startKeyInclusive;
this.infiniteStartKey = startKey == null;
this.stop = endKey;
this.stopKeyInclusive = endKeyInclusive;
this.infiniteStopKey = stop == null;
if (!infiniteStartKey && !infiniteStopKey && beforeStartKeyImpl(endKey)) {
throw new IllegalArgumentException(
"Start key must be less than end key in range (" + startKey + ", " + endKey + ")");
}
}
/**
* Copies a range.
*
* @param range range to copy
*/
public Range(Range range) {
this(range.start, range.startKeyInclusive, range.infiniteStartKey, range.stop,
range.stopKeyInclusive, range.infiniteStopKey);
}
/**
* Creates a range from start to stop.
*
* @param start set this to null when negative infinity is needed
* @param stop set this to null when infinity is needed
* @param startKeyInclusive determines if the ranges includes the start key
* @param stopKeyInclusive determines if the range includes the end key
* @param infiniteStartKey true if start key is negative infinity (null)
* @param infiniteStopKey true if stop key is positive infinity (null)
* @throws IllegalArgumentException if stop is before start, or infiniteStartKey is true but start
* is not null, or infiniteStopKey is true but stop is not null
*/
public Range(Key start, Key stop, boolean startKeyInclusive, boolean stopKeyInclusive,
boolean infiniteStartKey, boolean infiniteStopKey) {
this(start, startKeyInclusive, infiniteStartKey, stop, stopKeyInclusive, infiniteStopKey);
if (!infiniteStartKey && !infiniteStopKey && beforeStartKeyImpl(stop)) {
throw new IllegalArgumentException(
"Start key must be less than end key in range (" + start + ", " + stop + ")");
}
}
/**
* Creates a range from start to stop. Unlike the public six-argument method, this one does not
* assure that stop is after start, which helps performance in cases where that assurance is
* already in place.
*
* @param start set this to null when negative infinity is needed
* @param startKeyInclusive determines if the ranges includes the start key
* @param infiniteStartKey true if start key is negative infinity (null)
* @param stop set this to null when infinity is needed
* @param stopKeyInclusive determines if the range includes the end key
* @param infiniteStopKey true if stop key is positive infinity (null)
* @throws IllegalArgumentException if infiniteStartKey is true but start is not null, or
* infiniteStopKey is true but stop is not null
*/
protected Range(Key start, boolean startKeyInclusive, boolean infiniteStartKey, Key stop,
boolean stopKeyInclusive, boolean infiniteStopKey) {
if (infiniteStartKey && start != null) {
throw new IllegalArgumentException();
}
if (infiniteStopKey && stop != null) {
throw new IllegalArgumentException();
}
this.start = start;
this.stop = stop;
this.startKeyInclusive = startKeyInclusive;
this.stopKeyInclusive = stopKeyInclusive;
this.infiniteStartKey = infiniteStartKey;
this.infiniteStopKey = infiniteStopKey;
}
/**
* Creates a range from a Thrift range.
*
* @param trange Thrift range
*/
public Range(TRange trange) {
this(trange.start == null ? null : new Key(trange.start), trange.startKeyInclusive,
trange.infiniteStartKey, trange.stop == null ? null : new Key(trange.stop),
trange.stopKeyInclusive, trange.infiniteStopKey);
if (!infiniteStartKey && !infiniteStopKey && beforeStartKeyImpl(stop)) {
throw new IllegalArgumentException(
"Start key must be less than end key in range (" + start + ", " + stop + ")");
}
}
/**
* Gets the start key, or null if the start is negative infinity.
*
* @return start key
*/
public Key getStartKey() {
if (infiniteStartKey) {
return null;
}
return start;
}
/**
* Determines if the given key is before the start key of this range.
*
* @param key key to check
* @return true if the given key is before the range, otherwise false
*/
public boolean beforeStartKey(Key key) {
return beforeStartKeyImpl(key);
}
/**
* Implements logic of {@link #beforeStartKey(Key)}, but in a private method, so that it can be
* safely used by constructors if a subclass overrides that {@link #beforeStartKey(Key)}
*/
private boolean beforeStartKeyImpl(Key key) {
if (infiniteStartKey) {
return false;
}
if (startKeyInclusive) {
return key.compareTo(start) < 0;
}
return key.compareTo(start) <= 0;
}
/**
* Gets the ending key, or null if the end is positive infinity.
*
* @return ending key
*/
public Key getEndKey() {
if (infiniteStopKey) {
return null;
}
return stop;
}
/**
* Determines if the given key is after the ending key of this range.
*
* @param key key to check
* @return true if the given key is after the range, otherwise false
*/
public boolean afterEndKey(Key key) {
if (infiniteStopKey) {
return false;
}
if (stopKeyInclusive) {
return stop.compareTo(key) < 0;
}
return stop.compareTo(key) <= 0;
}
@Override
public int hashCode() {
int startHash = infiniteStartKey ? 0 : start.hashCode() + (startKeyInclusive ? 1 : 0);
int stopHash = infiniteStopKey ? 0 : stop.hashCode() + (stopKeyInclusive ? 1 : 0);
return startHash + stopHash;
}
@Override
public boolean equals(Object o) {
if (o instanceof Range) {
return equals((Range) o);
}
return false;
}
/**
* Determines if this range equals another.
*
* @param otherRange range to compare
* @return true if ranges are equals, false otherwise
* @see #compareTo(Range)
*/
public boolean equals(Range otherRange) {
return compareTo(otherRange) == 0;
}
/**
* Compares this range to another range. Compares in order: start key, inclusiveness of start key,
* end key, inclusiveness of end key. Infinite keys sort first, and non-infinite keys are compared
* with {@link Key#compareTo(Key)}. Inclusive sorts before non-inclusive.
*
* @param o range to compare
* @return comparison result
*/
@Override
public int compareTo(Range o) {
int comp;
if (infiniteStartKey) {
if (o.infiniteStartKey) {
comp = 0;
} else {
comp = -1;
}
} else if (o.infiniteStartKey) {
comp = 1;
} else {
comp = start.compareTo(o.start);
if (comp == 0) {
if (startKeyInclusive && !o.startKeyInclusive) {
comp = -1;
} else if (!startKeyInclusive && o.startKeyInclusive) {
comp = 1;
}
}
}
if (comp == 0) {
if (infiniteStopKey) {
if (o.infiniteStopKey) {
comp = 0;
} else {
comp = 1;
}
} else if (o.infiniteStopKey) {
comp = -1;
} else {
comp = stop.compareTo(o.stop);
if (comp == 0) {
if (stopKeyInclusive && !o.stopKeyInclusive) {
comp = 1;
} else if (!stopKeyInclusive && o.stopKeyInclusive) {
comp = -1;
}
}
}
}
return comp;
}
/**
* Determines if the given key falls within this range.
*
* @param key key to consider
* @return true if the given key falls within the range, false otherwise
*/
public boolean contains(Key key) {
return !beforeStartKey(key) && !afterEndKey(key);
}
/**
* Merges overlapping and adjacent ranges. For example given the following input:
*
* <pre>
* [a,c], (c, d], (g,m), (j,t]
* </pre>
*
* the following ranges would be returned:
*
* <pre>
* [a,d], (g,t]
* </pre>
*
* @param ranges to merge
* @return list of merged ranges
*/
public static List<Range> mergeOverlapping(Collection<Range> ranges) {
if (ranges.isEmpty()) {
return Collections.emptyList();
}
if (ranges.size() == 1) {
return Collections.singletonList(ranges.iterator().next());
}
List<Range> ral = new ArrayList<>(ranges);
Collections.sort(ral);
ArrayList<Range> ret = new ArrayList<>(ranges.size());
Range currentRange = ral.get(0);
boolean currentStartKeyInclusive = ral.get(0).startKeyInclusive;
for (int i = 1; i < ral.size(); i++) {
// because of inclusive switch, equal keys may not be seen
if (currentRange.infiniteStopKey) {
// this range has the minimal start key and
// an infinite end key so it will contain all
// other ranges
break;
}
Range range = ral.get(i);
boolean startKeysEqual;
if (range.infiniteStartKey) {
// previous start key must be infinite because it is sorted
assert currentRange.infiniteStartKey;
startKeysEqual = true;
} else if (currentRange.infiniteStartKey) {
startKeysEqual = false;
} else {
startKeysEqual = currentRange.start.equals(range.start);
}
if (startKeysEqual || currentRange.contains(range.start) || (!currentRange.stopKeyInclusive
&& range.startKeyInclusive && range.start.equals(currentRange.stop))) {
int cmp;
if (range.infiniteStopKey || (cmp = range.stop.compareTo(currentRange.stop)) > 0
|| (cmp == 0 && range.stopKeyInclusive)) {
currentRange = new Range(currentRange.getStartKey(), currentStartKeyInclusive,
range.getEndKey(), range.stopKeyInclusive);
} /* else currentRange contains ral.get(i) */
} else {
ret.add(currentRange);
currentRange = range;
currentStartKeyInclusive = range.startKeyInclusive;
}
}
ret.add(currentRange);
return ret;
}
/**
* Creates a range which represents the intersection of this range and the passed in range. The
* following example will print true.
*
* <pre>
* Range range1 = new Range("a", "f");
* Range range2 = new Range("c", "n");
* Range range3 = range1.clip(range2);
* System.out.println(range3.equals(new Range("c", "f")));
* </pre>
*
* @param range range to clip to
* @return the intersection of this range and the given range
* @throws IllegalArgumentException if ranges does not overlap
*/
public Range clip(Range range) {
return clip(range, false);
}
/**
* Creates a range which represents the intersection of this range and the passed in range. Unlike
* {@link #clip(Range)}, this method can optionally return null if the ranges do not overlap,
* instead of throwing an exception. The returnNullIfDisjoint parameter controls this behavior.
*
* @param range range to clip to
* @param returnNullIfDisjoint true to return null if ranges are disjoint, false to throw an
* exception
* @return the intersection of this range and the given range, or null if ranges do not overlap
* and returnNullIfDisjoint is true
* @throws IllegalArgumentException if ranges does not overlap and returnNullIfDisjoint is false
* @see Range#clip(Range)
*/
public Range clip(Range range, boolean returnNullIfDisjoint) {
Key sk = range.getStartKey();
boolean ski = range.isStartKeyInclusive();
Key ek = range.getEndKey();
boolean eki = range.isEndKeyInclusive();
if (range.getStartKey() == null) {
if (getStartKey() != null) {
sk = getStartKey();
ski = isStartKeyInclusive();
}
} else if (afterEndKey(range.getStartKey())
|| (getEndKey() != null && range.getStartKey().equals(getEndKey())
&& !(range.isStartKeyInclusive() && isEndKeyInclusive()))) {
if (returnNullIfDisjoint) {
return null;
}
throw new IllegalArgumentException("Range " + range + " does not overlap " + this);
} else if (beforeStartKey(range.getStartKey())) {
sk = getStartKey();
ski = isStartKeyInclusive();
}
if (range.getEndKey() == null) {
if (getEndKey() != null) {
ek = getEndKey();
eki = isEndKeyInclusive();
}
} else if (beforeStartKey(range.getEndKey())
|| (getStartKey() != null && range.getEndKey().equals(getStartKey())
&& !(range.isEndKeyInclusive() && isStartKeyInclusive()))) {
if (returnNullIfDisjoint) {
return null;
}
throw new IllegalArgumentException("Range " + range + " does not overlap " + this);
} else if (afterEndKey(range.getEndKey())) {
ek = getEndKey();
eki = isEndKeyInclusive();
}
return new Range(sk, ski, ek, eki);
}
/**
* Creates a new range that is bounded by the columns passed in. The start key in the returned
* range will have a column >= to the minimum column. The end key in the returned range will
* have a column <= the max column.
*
* @param min minimum column
* @param max maximum column
* @return a column bounded range
* @throws IllegalArgumentException if the minimum column compares greater than the maximum column
*/
public Range bound(Column min, Column max) {
if (min.compareTo(max) > 0) {
throw new IllegalArgumentException("min column > max column " + min + " " + max);
}
Key sk = getStartKey();
boolean ski = isStartKeyInclusive();
if (sk != null) {
ByteSequence cf = sk.getColumnFamilyData();
ByteSequence cq = sk.getColumnQualifierData();
ByteSequence mincf = new ArrayByteSequence(min.columnFamily);
ByteSequence mincq;
if (min.columnQualifier != null) {
mincq = new ArrayByteSequence(min.columnQualifier);
} else {
mincq = new ArrayByteSequence(new byte[0]);
}
int cmp = cf.compareTo(mincf);
if (cmp < 0 || (cmp == 0 && cq.compareTo(mincq) < 0)) {
ski = true;
sk = new Key(sk.getRowData().toArray(), mincf.toArray(), mincq.toArray(), new byte[0],
Long.MAX_VALUE, true);
}
}
Key ek = getEndKey();
boolean eki = isEndKeyInclusive();
if (ek != null) {
ByteSequence row = ek.getRowData();
ByteSequence cf = ek.getColumnFamilyData();
ByteSequence cq = ek.getColumnQualifierData();
ByteSequence cv = ek.getColumnVisibilityData();
ByteSequence maxcf = new ArrayByteSequence(max.columnFamily);
ByteSequence maxcq = null;
if (max.columnQualifier != null) {
maxcq = new ArrayByteSequence(max.columnQualifier);
}
boolean set = false;
int comp = cf.compareTo(maxcf);
if (comp > 0) {
set = true;
} else if (comp == 0 && maxcq != null && cq.compareTo(maxcq) > 0) {
set = true;
} else if (!eki && row.length() > 0 && row.byteAt(row.length() - 1) == 0 && cf.length() == 0
&& cq.length() == 0 && cv.length() == 0 && ek.getTimestamp() == Long.MAX_VALUE) {
row = row.subSequence(0, row.length() - 1);
set = true;
}
if (set) {
eki = false;
if (maxcq == null) {
ek = new Key(row.toArray(), maxcf.toArray(), new byte[0], new byte[0], 0, false)
.followingKey(PartialKey.ROW_COLFAM);
} else {
ek = new Key(row.toArray(), maxcf.toArray(), maxcq.toArray(), new byte[0], 0, false)
.followingKey(PartialKey.ROW_COLFAM_COLQUAL);
}
}
}
return new Range(sk, ski, ek, eki);
}
@Override
public String toString() {
return ((startKeyInclusive && start != null) ? "[" : "(") + (start == null ? "-inf" : start)
+ "," + (stop == null ? "+inf" : stop) + ((stopKeyInclusive && stop != null) ? "]" : ")");
}
@Override
public void readFields(DataInput in) throws IOException {
infiniteStartKey = in.readBoolean();
infiniteStopKey = in.readBoolean();
if (infiniteStartKey) {
start = null;
} else {
start = new Key();
start.readFields(in);
}
if (infiniteStopKey) {
stop = null;
} else {
stop = new Key();
stop.readFields(in);
}
startKeyInclusive = in.readBoolean();
stopKeyInclusive = in.readBoolean();
if (!infiniteStartKey && !infiniteStopKey && beforeStartKey(stop)) {
throw new InvalidObjectException(
"Start key must be less than end key in range (" + start + ", " + stop + ")");
}
}
@Override
public void write(DataOutput out) throws IOException {
out.writeBoolean(infiniteStartKey);
out.writeBoolean(infiniteStopKey);
if (!infiniteStartKey) {
start.write(out);
}
if (!infiniteStopKey) {
stop.write(out);
}
out.writeBoolean(startKeyInclusive);
out.writeBoolean(stopKeyInclusive);
}
/**
* Gets whether the start key of this range is inclusive.
*
* @return true if start key is inclusive
*/
public boolean isStartKeyInclusive() {
return startKeyInclusive;
}
/**
* Gets whether the end key of this range is inclusive.
*
* @return true if end key is inclusive
*/
public boolean isEndKeyInclusive() {
return stopKeyInclusive;
}
/**
* Converts this range to Thrift.
*
* @return Thrift range
*/
public TRange toThrift() {
return new TRange(start == null ? null : start.toThrift(),
stop == null ? null : stop.toThrift(), startKeyInclusive, stopKeyInclusive,
infiniteStartKey, infiniteStopKey);
}
/**
* Gets whether the start key is negative infinity.
*
* @return true if start key is negative infinity
*/
public boolean isInfiniteStartKey() {
return infiniteStartKey;
}
/**
* Gets whether the end key is positive infinity.
*
* @return true if end key is positive infinity
*/
public boolean isInfiniteStopKey() {
return infiniteStopKey;
}
/**
* Creates a range that covers an exact row. Returns the same Range as {@link #Range(Text)}.
*
* @param row row to cover; set to null to cover all rows
*/
public static Range exact(Text row) {
return new Range(row);
}
/**
* Creates a range that covers an exact row and column family.
*
* @param row row to cover
* @param cf column family to cover
*/
public static Range exact(Text row, Text cf) {
Key startKey = new Key(row, cf);
return new Range(startKey, true, startKey.followingKey(PartialKey.ROW_COLFAM), false);
}
/**
* Creates a range that covers an exact row, column family, and column qualifier.
*
* @param row row to cover
* @param cf column family to cover
* @param cq column qualifier to cover
*/
public static Range exact(Text row, Text cf, Text cq) {
Key startKey = new Key(row, cf, cq);
return new Range(startKey, true, startKey.followingKey(PartialKey.ROW_COLFAM_COLQUAL), false);
}
/**
* Creates a range that covers an exact row, column family, column qualifier, and column
* visibility.
*
* @param row row to cover
* @param cf column family to cover
* @param cq column qualifier to cover
* @param cv column visibility to cover
*/
public static Range exact(Text row, Text cf, Text cq, Text cv) {
Key startKey = new Key(row, cf, cq, cv);
return new Range(startKey, true, startKey.followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS),
false);
}
/**
* Creates a range that covers an exact row, column family, column qualifier, column visibility,
* and timestamp.
*
* @param row row to cover
* @param cf column family to cover
* @param cq column qualifier to cover
* @param cv column visibility to cover
* @param ts timestamp to cover
*/
public static Range exact(Text row, Text cf, Text cq, Text cv, long ts) {
Key startKey = new Key(row, cf, cq, cv, ts);
return new Range(startKey, true,
startKey.followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME), false);
}
/**
* Returns a Text that sorts just after all Texts beginning with a prefix.
*
* @param prefix to follow
* @return prefix that immediately follows the given prefix when sorted, or null if no prefix can
* follow (i.e., the string is all 0xff bytes)
*/
public static Text followingPrefix(Text prefix) {
byte[] prefixBytes = prefix.getBytes();
// find the last byte in the array that is not 0xff
int changeIndex = prefix.getLength() - 1;
while (changeIndex >= 0 && prefixBytes[changeIndex] == (byte) 0xff) {
changeIndex--;
}
if (changeIndex < 0) {
return null;
}
// copy prefix bytes into new array
byte[] newBytes = new byte[changeIndex + 1];
System.arraycopy(prefixBytes, 0, newBytes, 0, changeIndex + 1);
// increment the selected byte
newBytes[changeIndex]++;
return new Text(newBytes);
}
/**
* Returns a Range that covers all rows beginning with a prefix.
*
* @param rowPrefix prefix of rows to cover
*/
public static Range prefix(Text rowPrefix) {
Text fp = followingPrefix(rowPrefix);
return new Range(new Key(rowPrefix), true, fp == null ? null : new Key(fp), false);
}
/**
* Returns a Range that covers all column families beginning with a prefix within a given row.
*
* @param row row to cover
* @param cfPrefix prefix of column families to cover
*/
public static Range prefix(Text row, Text cfPrefix) {
Text fp = followingPrefix(cfPrefix);
return new Range(new Key(row, cfPrefix), true,
fp == null ? new Key(row).followingKey(PartialKey.ROW) : new Key(row, fp), false);
}
/**
* Returns a Range that covers all column qualifiers beginning with a prefix within a given row
* and column family.
*
* @param row row to cover
* @param cf column family to cover
* @param cqPrefix prefix of column qualifiers to cover
*/
public static Range prefix(Text row, Text cf, Text cqPrefix) {
Text fp = followingPrefix(cqPrefix);
return new Range(new Key(row, cf, cqPrefix), true,
fp == null ? new Key(row, cf).followingKey(PartialKey.ROW_COLFAM) : new Key(row, cf, fp),
false);
}
/**
* Returns a Range that covers all column visibilities beginning with a prefix within a given row,
* column family, and column qualifier.
*
* @param row row to cover
* @param cf column family to cover
* @param cq column qualifier to cover
* @param cvPrefix prefix of column visibilities to cover
*/
public static Range prefix(Text row, Text cf, Text cq, Text cvPrefix) {
Text fp = followingPrefix(cvPrefix);
return new Range(new Key(row, cf, cq, cvPrefix), true,
fp == null ? new Key(row, cf, cq).followingKey(PartialKey.ROW_COLFAM_COLQUAL)
: new Key(row, cf, cq, fp),
false);
}
/**
* Creates a range that covers an exact row.
*
* @param row row to cover; set to null to cover all rows
* @see #exact(Text)
*/
public static Range exact(CharSequence row) {
return Range.exact(new Text(row.toString()));
}
/**
* Creates a range that covers an exact row and column family.
*
* @param row row to cover
* @param cf column family to cover
* @see #exact(Text, Text)
*/
public static Range exact(CharSequence row, CharSequence cf) {
return Range.exact(new Text(row.toString()), new Text(cf.toString()));
}
/**
* Creates a range that covers an exact row, column family, and column qualifier.
*
* @param row row to cover
* @param cf column family to cover
* @param cq column qualifier to cover
* @see #exact(Text, Text, Text)
*/
public static Range exact(CharSequence row, CharSequence cf, CharSequence cq) {
return Range.exact(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()));
}
/**
* Creates a range that covers an exact row, column family, column qualifier, and column
* visibility.
*
* @param row row to cover
* @param cf column family to cover
* @param cq column qualifier to cover
* @param cv column visibility to cover
* @see #exact(Text, Text, Text, Text)
*/
public static Range exact(CharSequence row, CharSequence cf, CharSequence cq, CharSequence cv) {
return Range.exact(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()),
new Text(cv.toString()));
}
/**
* Creates a range that covers an exact row, column family, column qualifier, column visibility,
* and timestamp.
*
* @param row row to cover
* @param cf column family to cover
* @param cq column qualifier to cover
* @param cv column visibility to cover
* @param ts timestamp to cover
* @see #exact(Text, Text, Text, Text, long)
*/
public static Range exact(CharSequence row, CharSequence cf, CharSequence cq, CharSequence cv,
long ts) {
return Range.exact(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()),
new Text(cv.toString()), ts);
}
/**
* Returns a Range that covers all rows beginning with a prefix.
*
* @param rowPrefix prefix of rows to cover
* @see #prefix(Text)
*/
public static Range prefix(CharSequence rowPrefix) {
return Range.prefix(new Text(rowPrefix.toString()));
}
/**
* Returns a Range that covers all column families beginning with a prefix within a given row.
*
* @param row row to cover
* @param cfPrefix prefix of column families to cover
* @see #prefix(Text, Text)
*/
public static Range prefix(CharSequence row, CharSequence cfPrefix) {
return Range.prefix(new Text(row.toString()), new Text(cfPrefix.toString()));
}
/**
* Returns a Range that covers all column qualifiers beginning with a prefix within a given row
* and column family.
*
* @param row row to cover
* @param cf column family to cover
* @param cqPrefix prefix of column qualifiers to cover
* @see #prefix(Text, Text, Text)
*/
public static Range prefix(CharSequence row, CharSequence cf, CharSequence cqPrefix) {
return Range.prefix(new Text(row.toString()), new Text(cf.toString()),
new Text(cqPrefix.toString()));
}
/**
* Returns a Range that covers all column visibilities beginning with a prefix within a given row,
* column family, and column qualifier.
*
* @param row row to cover
* @param cf column family to cover
* @param cq column qualifier to cover
* @param cvPrefix prefix of column visibilities to cover
* @see #prefix(Text, Text, Text, Text)
*/
public static Range prefix(CharSequence row, CharSequence cf, CharSequence cq,
CharSequence cvPrefix) {
return Range.prefix(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()),
new Text(cvPrefix.toString()));
}
}
| 9,934 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/Condition.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import static com.google.common.base.Preconditions.checkArgument;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Objects;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.hadoop.io.Text;
/**
* Conditions that must be met on a particular column in a row.
*
* @since 1.6.0
*/
public class Condition {
private ByteSequence cf;
private ByteSequence cq;
private ByteSequence cv;
private ByteSequence val;
private Long ts;
private IteratorSetting[] iterators = new IteratorSetting[0];
private static final ByteSequence EMPTY = new ArrayByteSequence(new byte[0]);
/**
* Creates a new condition. The initial column value and timestamp are null, and the initial
* column visibility is empty. Characters in the column family and column qualifier are encoded as
* bytes in the condition using UTF-8.
*
* @param cf column family
* @param cq column qualifier
* @throws IllegalArgumentException if any argument is null
*/
public Condition(CharSequence cf, CharSequence cq) {
checkArgument(cf != null, "cf is null");
checkArgument(cq != null, "cq is null");
this.cf = new ArrayByteSequence(cf.toString().getBytes(UTF_8));
this.cq = new ArrayByteSequence(cq.toString().getBytes(UTF_8));
this.cv = EMPTY;
}
/**
* Creates a new condition. The initial column value and timestamp are null, and the initial
* column visibility is empty.
*
* @param cf column family
* @param cq column qualifier
* @throws IllegalArgumentException if any argument is null
*/
public Condition(byte[] cf, byte[] cq) {
checkArgument(cf != null, "cf is null");
checkArgument(cq != null, "cq is null");
this.cf = new ArrayByteSequence(cf);
this.cq = new ArrayByteSequence(cq);
this.cv = EMPTY;
}
/**
* Creates a new condition. The initial column value and timestamp are null, and the initial
* column visibility is empty.
*
* @param cf column family
* @param cq column qualifier
* @throws IllegalArgumentException if any argument is null
*/
public Condition(Text cf, Text cq) {
checkArgument(cf != null, "cf is null");
checkArgument(cq != null, "cq is null");
this.cf = new ArrayByteSequence(cf.getBytes(), 0, cf.getLength());
this.cq = new ArrayByteSequence(cq.getBytes(), 0, cq.getLength());
this.cv = EMPTY;
}
/**
* Creates a new condition. The initial column value and timestamp are null, and the initial
* column visibility is empty.
*
* @param cf column family
* @param cq column qualifier
* @throws IllegalArgumentException if any argument is null
*/
public Condition(ByteSequence cf, ByteSequence cq) {
checkArgument(cf != null, "cf is null");
checkArgument(cq != null, "cq is null");
this.cf = cf;
this.cq = cq;
this.cv = EMPTY;
}
/**
* Gets the column family of this condition.
*
* @return column family
*/
public ByteSequence getFamily() {
return cf;
}
/**
* Gets the column qualifier of this condition.
*
* @return column qualifier
*/
public ByteSequence getQualifier() {
return cq;
}
/**
* Sets the version for the column to check. If this is not set then the latest column will be
* checked, unless iterators do something different.
*
* @param ts timestamp
* @return this condition
*/
public Condition setTimestamp(long ts) {
this.ts = ts;
return this;
}
/**
* Gets the timestamp of this condition.
*
* @return timestamp
*/
public Long getTimestamp() {
return ts;
}
/**
* This method sets the expected value of a column. In order for the condition to pass the column
* must exist and have this value. If a value is not set, then the column must be absent for the
* condition to pass. The passed-in character sequence is encoded as UTF-8. See
* {@link #setValue(byte[])}.
*
* @param value value
* @return this condition
* @throws IllegalArgumentException if value is null
*/
public Condition setValue(CharSequence value) {
checkArgument(value != null, "value is null");
this.val = new ArrayByteSequence(value.toString().getBytes(UTF_8));
return this;
}
/**
* This method sets the expected value of a column. In order for the condition to pass the column
* must exist and have this value. If a value is not set, then the column must be absent for the
* condition to pass.
*
* @param value value
* @return this condition
* @throws IllegalArgumentException if value is null
*/
public Condition setValue(byte[] value) {
checkArgument(value != null, "value is null");
this.val = new ArrayByteSequence(value);
return this;
}
/**
* This method sets the expected value of a column. In order for the condition to pass the column
* must exist and have this value. If a value is not set, then the column must be absent for the
* condition to pass. See {@link #setValue(byte[])}.
*
* @param value value
* @return this condition
* @throws IllegalArgumentException if value is null
*/
public Condition setValue(Text value) {
checkArgument(value != null, "value is null");
this.val = new ArrayByteSequence(value.getBytes(), 0, value.getLength());
return this;
}
/**
* This method sets the expected value of a column. In order for the condition to pass the column
* must exist and have this value. If a value is not set, then the column must be absent for the
* condition to pass. See {@link #setValue(byte[])}.
*
* @param value value
* @return this condition
* @throws IllegalArgumentException if value is null
*/
public Condition setValue(ByteSequence value) {
checkArgument(value != null, "value is null");
this.val = value;
return this;
}
/**
* Gets the value of this condition.
*
* @return value
*/
public ByteSequence getValue() {
return val;
}
/**
* Sets the visibility for the column to check. If not set it defaults to empty visibility.
*
* @param cv column visibility
* @throws IllegalArgumentException if cv is null
*/
public Condition setVisibility(ColumnVisibility cv) {
checkArgument(cv != null, "cv is null");
this.cv = new ArrayByteSequence(cv.getExpression());
return this;
}
/**
* Gets the column visibility of this condition.
*
* @return column visibility
*/
public ByteSequence getVisibility() {
return cv;
}
/**
* Set iterators to use when reading the columns value. These iterators will be applied in
* addition to the iterators configured for the table. Using iterators its possible to test other
* conditions, besides equality and absence, like less than. On the server side the iterators will
* be seeked using a range that covers only the family, qualifier, and visibility (if the
* timestamp is set then it will be used to narrow the range). Value equality will be tested using
* the first entry returned by the iterator stack.
*
* @param iterators iterators
* @return this condition
* @throws IllegalArgumentException if iterators or any of its elements are null, or if any two
* iterators share the same name or priority
*/
public Condition setIterators(IteratorSetting... iterators) {
checkArgument(iterators != null, "iterators is null");
if (iterators.length > 1) {
HashSet<String> names = new HashSet<>();
HashSet<Integer> prios = new HashSet<>();
for (IteratorSetting iteratorSetting : iterators) {
if (!names.add(iteratorSetting.getName())) {
throw new IllegalArgumentException(
"iterator name used more than once " + iteratorSetting.getName());
}
if (!prios.add(iteratorSetting.getPriority())) {
throw new IllegalArgumentException(
"iterator priority used more than once " + iteratorSetting.getPriority());
}
}
}
this.iterators = iterators;
return this;
}
/**
* Gets the iterators for this condition.
*
* @return iterators
*/
public IteratorSetting[] getIterators() {
return iterators;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Condition)) {
return false;
}
Condition condition = (Condition) o;
return Objects.equals(cf, condition.cf) && Objects.equals(cq, condition.cq)
&& Objects.equals(cv, condition.cv) && Objects.equals(val, condition.val)
&& Objects.equals(ts, condition.ts) && Arrays.equals(iterators, condition.iterators);
}
@Override
public int hashCode() {
int result = Objects.hash(cf, cq, cv, val, ts);
result = 31 * result + Arrays.hashCode(iterators);
return result;
}
}
| 9,935 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/Value.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Objects.requireNonNull;
import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
/**
* A byte sequence that is usable as a key or value. Based on
* {@link org.apache.hadoop.io.BytesWritable} only this class is NOT resizable and DOES NOT
* distinguish between the size of the sequence and the current capacity as
* {@link org.apache.hadoop.io.BytesWritable} does. Hence it is comparatively 'immutable'.
*/
public class Value implements WritableComparable<Object> {
private static final byte[] EMPTY = new byte[0];
protected byte[] value;
/**
* Creates a zero-size sequence.
*/
public Value() {
this(EMPTY, false);
}
/**
* Creates a value using the UTF-8 encoding of the CharSequence
*
* @param cs may not be null
*
* @since 1.8.0
*/
public Value(CharSequence cs) {
this(cs.toString().getBytes(UTF_8));
}
/**
* Creates a Value using the bytes of the Text. Makes a copy, does not use the byte array from the
* Text.
*
* @param text may not be null
*
* @since 1.8.0
*/
public Value(Text text) {
this(text.getBytes(), 0, text.getLength());
}
/**
* Creates a Value using a byte array as the initial value. The given byte array is used directly
* as the backing array, so later changes made to the array reflect into the new Value.
*
* @param bytes May not be null
*/
public Value(byte[] bytes) {
this(bytes, false);
}
/**
* Creates a Value using the bytes in a buffer as the initial value. Makes a defensive copy.
*
* @param bytes May not be null
*/
public Value(ByteBuffer bytes) {
/* TODO ACCUMULO-2509 right now this uses the entire backing array, which must be accessible. */
this(toBytes(bytes), false);
}
/**
* Creates a Value using a byte array as the initial value.
*
* @param bytes may not be null
* @param copy false to use the given byte array directly as the backing array, true to force a
* copy
*/
public Value(byte[] bytes, boolean copy) {
requireNonNull(bytes);
if (copy) {
this.value = new byte[bytes.length];
System.arraycopy(bytes, 0, this.value, 0, bytes.length);
} else {
this.value = bytes;
}
}
/**
* Creates a new Value based on another.
*
* @param ibw may not be null.
*/
public Value(final Value ibw) {
this(ibw.get(), 0, ibw.getSize());
}
/**
* Creates a Value based on a range in a byte array. A copy of the bytes is always made.
*
* @param newData source of copy, may not be null
* @param offset the offset in newData to start with for value bytes
* @param length the number of bytes in the value
* @throws IndexOutOfBoundsException if offset or length are invalid
*/
public Value(final byte[] newData, final int offset, final int length) {
requireNonNull(newData);
this.value = new byte[length];
System.arraycopy(newData, offset, this.value, 0, length);
}
/**
* Gets the byte data of this value.
*
* @return the underlying byte array directly.
*/
public byte[] get() {
assert (value != null);
return this.value;
}
/**
* Sets the byte data of this value. The given byte array is used directly as the backing array,
* so later changes made to the array reflect into this Value.
*
* @param b may not be null
*/
public void set(final byte[] b) {
requireNonNull(b);
this.value = b;
}
/**
* Sets the byte data of this value. The given byte array is copied.
*
* @param b may not be null
*/
public void copy(byte[] b) {
requireNonNull(b);
this.value = new byte[b.length];
System.arraycopy(b, 0, this.value, 0, b.length);
}
/**
* Gets the size of this value.
*
* @return size in bytes
*/
public int getSize() {
assert (value != null);
return this.value.length;
}
@Override
public void readFields(final DataInput in) throws IOException {
this.value = new byte[in.readInt()];
in.readFully(this.value, 0, this.value.length);
}
@Override
public void write(final DataOutput out) throws IOException {
out.writeInt(this.value.length);
out.write(this.value, 0, this.value.length);
}
// Below methods copied from BytesWritable
@Override
public int hashCode() {
return WritableComparator.hashBytes(value, this.value.length);
}
/**
* Define the sort order of the BytesWritable.
*
* @param right_obj The other bytes writable
* @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
* smaller than right.
*/
@Override
public int compareTo(Object right_obj) {
return compareTo(((Value) right_obj).get());
}
/**
* Compares the bytes in this object to the specified byte array
*
* @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
* smaller than right.
*/
public int compareTo(final byte[] that) {
int diff = this.value.length - that.length;
return (diff != 0) ? diff
: WritableComparator.compareBytes(this.value, 0, this.value.length, that, 0, that.length);
}
@Override
public boolean equals(Object right_obj) {
if (right_obj instanceof Value) {
return compareTo(right_obj) == 0;
}
return false;
}
/**
* Compares the bytes in this object to the specified byte array
*
* @return true if the contents of this Value is equivalent to the supplied byte array
* @since 2.0.0
*/
public boolean contentEquals(byte[] right_obj) {
return compareTo(right_obj) == 0;
}
@Override
public String toString() {
return new String(get(), UTF_8);
}
/**
* A Comparator optimized for Value.
*/
public static class Comparator extends WritableComparator {
private BytesWritable.Comparator comparator = new BytesWritable.Comparator();
/** constructor */
public Comparator() {
super(Value.class);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return comparator.compare(b1, s1, l1, b2, s2, l2);
}
}
static { // register this comparator
WritableComparator.define(Value.class, new Comparator());
}
}
| 9,936 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/Key.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.dataImpl.thrift.TKey;
import org.apache.accumulo.core.dataImpl.thrift.TKeyValue;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
/**
* This is the Key used to store and access individual values in Accumulo. A Key is a tuple composed
* of a row, column family, column qualifier, column visibility, timestamp, and delete marker.
* <p>
* Keys are comparable and therefore have a sorted order defined by {@link #compareTo(Key)}.
*/
public class Key implements WritableComparable<Key>, Cloneable {
protected byte[] row;
protected byte[] colFamily;
protected byte[] colQualifier;
protected byte[] colVisibility;
protected long timestamp;
protected boolean deleted;
/**
* Create a {@link Key} builder.
*
* @since 2.0
* @param copyBytes if the bytes of the {@link Key} components should be copied
* @return the builder at the {@link KeyBuilder.RowStep}
*/
public static KeyBuilder.RowStep builder(boolean copyBytes) {
return new KeyBuilder.KeyBuilderImpl(copyBytes);
}
/**
* Create a {@link Key} builder. Using the builder makes it easy to mix types, like {@code String}
* and {@code byte[]}, for different fields. Copy bytes defaults to true.
*
* @since 2.0
* @return the builder at the {@link KeyBuilder.RowStep}
* @see #builder(boolean)
*/
public static KeyBuilder.RowStep builder() {
return new KeyBuilder.KeyBuilderImpl(true);
}
@Override
public boolean equals(Object o) {
if (o instanceof Key) {
return this.equals((Key) o, PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME_DEL);
}
return false;
}
private static final byte[] EMPTY_BYTES = new byte[0];
static byte[] copyIfNeeded(byte[] ba, int off, int len, boolean copyData) {
if (len == 0) {
return EMPTY_BYTES;
}
if (!copyData && ba.length == len && off == 0) {
return ba;
}
byte[] copy = new byte[len];
System.arraycopy(ba, off, copy, 0, len);
return copy;
}
private final void init(byte[] r, int rOff, int rLen, byte[] cf, int cfOff, int cfLen, byte[] cq,
int cqOff, int cqLen, byte[] cv, int cvOff, int cvLen, long ts, boolean del, boolean copy) {
row = copyIfNeeded(r, rOff, rLen, copy);
colFamily = copyIfNeeded(cf, cfOff, cfLen, copy);
colQualifier = copyIfNeeded(cq, cqOff, cqLen, copy);
colVisibility = copyIfNeeded(cv, cvOff, cvLen, copy);
timestamp = ts;
deleted = del;
}
/**
* Creates a key with empty row, empty column family, empty column qualifier, empty column
* visibility, timestamp {@link Long#MAX_VALUE}, and delete marker false.
*/
public Key() {
row = EMPTY_BYTES;
colFamily = EMPTY_BYTES;
colQualifier = EMPTY_BYTES;
colVisibility = EMPTY_BYTES;
timestamp = Long.MAX_VALUE;
deleted = false;
}
/**
* Creates a key with the specified row, empty column family, empty column qualifier, empty column
* visibility, timestamp {@link Long#MAX_VALUE}, and delete marker false. This constructor creates
* a copy of row.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @param row row ID
* @see #builder()
*/
public Key(Text row) {
init(row.getBytes(), 0, row.getLength(), EMPTY_BYTES, 0, 0, EMPTY_BYTES, 0, 0, EMPTY_BYTES, 0,
0, Long.MAX_VALUE, false, true);
}
/**
* Creates a key with the specified row, empty column family, empty column qualifier, empty column
* visibility, timestamp {@link Long#MAX_VALUE}, and delete marker false. This constructor creates
* a copy of row.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @param row row ID
* @since 1.8.0
* @see #builder()
*/
public Key(byte[] row) {
init(row, 0, row.length, EMPTY_BYTES, 0, 0, EMPTY_BYTES, 0, 0, EMPTY_BYTES, 0, 0,
Long.MAX_VALUE, false, true);
}
/**
* Creates a key with the specified row, empty column family, empty column qualifier, empty column
* visibility, the specified timestamp, and delete marker false. This constructor creates a copy
* of row.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @param row row ID
* @param ts timestamp
* @see #builder()
*/
public Key(Text row, long ts) {
this(row);
timestamp = ts;
}
/**
* Creates a key with the specified row, empty column family, empty column qualifier, empty column
* visibility, the specified timestamp, and delete marker false. This constructor creates a copy
* of row.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @param row row ID
* @param ts timestamp
* @since 1.8.0
* @see #builder()
*/
public Key(byte[] row, long ts) {
this(row);
timestamp = ts;
}
/**
* Creates a key. The delete marker defaults to false. This constructor creates a copy of each
* specified array.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @param row bytes containing row ID
* @param rOff offset into row where key's row ID begins (inclusive)
* @param rLen length of row ID in row
* @param cf bytes containing column family
* @param cfOff offset into cf where key's column family begins (inclusive)
* @param cfLen length of column family in cf
* @param cq bytes containing column qualifier
* @param cqOff offset into cq where key's column qualifier begins (inclusive)
* @param cqLen length of column qualifier in cq
* @param cv bytes containing column visibility
* @param cvOff offset into cv where key's column visibility begins (inclusive)
* @param cvLen length of column visibility in cv
* @param ts timestamp
* @see #builder()
*/
public Key(byte[] row, int rOff, int rLen, byte[] cf, int cfOff, int cfLen, byte[] cq, int cqOff,
int cqLen, byte[] cv, int cvOff, int cvLen, long ts) {
init(row, rOff, rLen, cf, cfOff, cfLen, cq, cqOff, cqLen, cv, cvOff, cvLen, ts, false, true);
}
/**
* Creates a key.
*
* @param row bytes containing row ID
* @param rOff offset into row where key's row ID begins (inclusive)
* @param rLen length of row ID in row
* @param cf bytes containing column family
* @param cfOff offset into cf where key's column family begins (inclusive)
* @param cfLen length of column family in cf
* @param cq bytes containing column qualifier
* @param cqOff offset into cq where key's column qualifier begins (inclusive)
* @param cqLen length of column qualifier in cq
* @param cv bytes containing column visibility
* @param cvOff offset into cv where key's column visibility begins (inclusive)
* @param cvLen length of column visibility in cv
* @param ts timestamp
* @param deleted delete marker
* @param copy if true, forces copy of byte array values into key
* @see #builder()
*/
Key(byte[] row, int rOff, int rLen, byte[] cf, int cfOff, int cfLen, byte[] cq, int cqOff,
int cqLen, byte[] cv, int cvOff, int cvLen, long ts, boolean deleted, boolean copy) {
init(row, rOff, rLen, cf, cfOff, cfLen, cq, cqOff, cqLen, cv, cvOff, cvLen, ts, deleted, copy);
}
/**
* Creates a key. The delete marker defaults to false. This constructor creates a copy of each
* specified array.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @param row row ID
* @param colFamily column family
* @param colQualifier column qualifier
* @param colVisibility column visibility
* @param timestamp timestamp
* @see #builder()
*/
public Key(byte[] row, byte[] colFamily, byte[] colQualifier, byte[] colVisibility,
long timestamp) {
this(row, colFamily, colQualifier, colVisibility, timestamp, false, true);
}
/**
* Creates a key. This constructor creates a copy of each specified arrays.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @param row row ID
* @param cf column family
* @param cq column qualifier
* @param cv column visibility
* @param ts timestamp
* @param deleted delete marker
* @see #builder()
*/
public Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted) {
this(row, cf, cq, cv, ts, deleted, true);
}
/**
* Creates a key.
*
* @param row row ID
* @param cf column family
* @param cq column qualifier
* @param cv column visibility
* @param ts timestamp
* @param deleted delete marker
* @param copy if true, forces copy of byte array values into key
* @see #builder()
*/
public Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy) {
init(row, 0, row.length, cf, 0, cf.length, cq, 0, cq.length, cv, 0, cv.length, ts, deleted,
copy);
}
/**
* Creates a key with the specified row, the specified column family, empty column qualifier,
* empty column visibility, timestamp {@link Long#MAX_VALUE}, and delete marker false. This
* constructor creates a copy of row.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @see #builder()
*/
public Key(Text row, Text cf) {
init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), EMPTY_BYTES, 0, 0,
EMPTY_BYTES, 0, 0, Long.MAX_VALUE, false, true);
}
/**
* Creates a key with the specified row, the specified column family, empty column qualifier,
* empty column visibility, timestamp {@link Long#MAX_VALUE}, and delete marker false. This
* constructor creates a copy of each specified array.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @since 1.8.0
* @see #builder()
*/
public Key(byte[] row, byte[] cf) {
init(row, 0, row.length, cf, 0, cf.length, EMPTY_BYTES, 0, 0, EMPTY_BYTES, 0, 0, Long.MAX_VALUE,
false, true);
}
/**
* Creates a key with the specified row, the specified column family, the specified column
* qualifier, empty column visibility, timestamp {@link Long#MAX_VALUE}, and delete marker false.
* This constructor creates a copy of row.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @see #builder()
*/
public Key(Text row, Text cf, Text cq) {
init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), cq.getBytes(), 0,
cq.getLength(), EMPTY_BYTES, 0, 0, Long.MAX_VALUE, false, true);
}
/**
* Creates a key with the specified row, the specified column family, the specified column
* qualifier, empty column visibility, timestamp {@link Long#MAX_VALUE}, and delete marker false.
* This constructor creates a copy of each specified array.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @since 1.8.0
* @see #builder()
*/
public Key(byte[] row, byte[] cf, byte[] cq) {
init(row, 0, row.length, cf, 0, cf.length, cq, 0, cq.length, EMPTY_BYTES, 0, 0, Long.MAX_VALUE,
false, true);
}
/**
* Creates a key with the specified row, the specified column family, the specified column
* qualifier, the specified column visibility, timestamp {@link Long#MAX_VALUE}, and delete marker
* false. This constructor creates a copy of row.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @see #builder()
*/
public Key(Text row, Text cf, Text cq, Text cv) {
init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), cq.getBytes(), 0,
cq.getLength(), cv.getBytes(), 0, cv.getLength(), Long.MAX_VALUE, false, true);
}
/**
* Creates a key with the specified row, the specified column family, the specified column
* qualifier, the specified column visibility, timestamp {@link Long#MAX_VALUE}, and delete marker
* false. This constructor creates a copy of each specified array.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @since 1.8.0
* @see #builder()
*/
public Key(byte[] row, byte[] cf, byte[] cq, byte[] cv) {
init(row, 0, row.length, cf, 0, cf.length, cq, 0, cq.length, cv, 0, cv.length, Long.MAX_VALUE,
false, true);
}
/**
* Creates a key with the specified row, the specified column family, the specified column
* qualifier, empty column visibility, the specified timestamp, and delete marker false. This
* constructor creates a copy of row.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @see #builder()
*/
public Key(Text row, Text cf, Text cq, long ts) {
init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), cq.getBytes(), 0,
cq.getLength(), EMPTY_BYTES, 0, 0, ts, false, true);
}
/**
* Creates a key with the specified row, the specified column family, the specified column
* qualifier, empty column visibility, the specified timestamp, and delete marker false. This
* constructor creates a copy of each specified array.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @since 1.8.0
* @see #builder()
*/
public Key(byte[] row, byte[] cf, byte[] cq, long ts) {
init(row, 0, row.length, cf, 0, cf.length, cq, 0, cq.length, EMPTY_BYTES, 0, 0, ts, false,
true);
}
/**
* Creates a key with the specified row, the specified column family, the specified column
* qualifier, the specified column visibility, the specified timestamp, and delete marker false.
* This constructor creates a copy of row.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @see #builder()
*/
public Key(Text row, Text cf, Text cq, Text cv, long ts) {
init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), cq.getBytes(), 0,
cq.getLength(), cv.getBytes(), 0, cv.getLength(), ts, false, true);
}
/**
* Creates a key with the specified row, the specified column family, the specified column
* qualifier, the specified column visibility, the specified timestamp, and delete marker false.
* This constructor creates a copy of row.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @see #builder()
*/
public Key(Text row, Text cf, Text cq, ColumnVisibility cv, long ts) {
byte[] expr = cv.getExpression();
init(row.getBytes(), 0, row.getLength(), cf.getBytes(), 0, cf.getLength(), cq.getBytes(), 0,
cq.getLength(), expr, 0, expr.length, ts, false, true);
}
/**
* Creates a key with the specified row, the specified column family, the specified column
* qualifier, the specified column visibility, the specified timestamp, and delete marker false.
* This constructor creates a copy of each specified array.
* <p>
* To avoid copying, use
* {@link Key#Key(byte[] row, byte[] cf, byte[] cq, byte[] cv, long ts, boolean deleted, boolean copy)}
* instead.
*
* @since 1.8.0
* @see #builder()
*/
public Key(byte[] row, byte[] cf, byte[] cq, ColumnVisibility cv, long ts) {
byte[] expr = cv.getExpression();
init(row, 0, row.length, cf, 0, cf.length, cq, 0, cq.length, expr, 0, expr.length, ts, false,
true);
}
/**
* Converts CharSequence to Text and creates a Key using {@link #Key(Text)}.
*
* @see #builder()
*/
public Key(CharSequence row) {
this(new Text(row.toString()));
}
/**
* Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text)}.
*
* @see #builder()
*/
public Key(CharSequence row, CharSequence cf) {
this(new Text(row.toString()), new Text(cf.toString()));
}
/**
* Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text,Text)}.
*
* @see #builder()
*/
public Key(CharSequence row, CharSequence cf, CharSequence cq) {
this(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()));
}
/**
* Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text,Text,Text)}.
*
* @see #builder()
*/
public Key(CharSequence row, CharSequence cf, CharSequence cq, CharSequence cv) {
this(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()),
new Text(cv.toString()));
}
/**
* Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text,Text,long)}.
*
* @see #builder()
*/
public Key(CharSequence row, CharSequence cf, CharSequence cq, long ts) {
this(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()), ts);
}
/**
* Converts CharSequence to Text and creates a Key using {@link #Key(Text,Text,Text,Text,long)}.
*
* @see #builder()
*/
public Key(CharSequence row, CharSequence cf, CharSequence cq, CharSequence cv, long ts) {
this(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()),
new Text(cv.toString()), ts);
}
/**
* Converts CharSequence to Text and creates a Key using
* {@link #Key(Text,Text,Text,ColumnVisibility,long)}.
*
* @see #builder()
*/
public Key(CharSequence row, CharSequence cf, CharSequence cq, ColumnVisibility cv, long ts) {
this(new Text(row.toString()), new Text(cf.toString()), new Text(cq.toString()),
new Text(cv.getExpression()), ts);
}
private byte[] followingArray(byte[] ba) {
byte[] fba = new byte[ba.length + 1];
System.arraycopy(ba, 0, fba, 0, ba.length);
fba[ba.length] = (byte) 0x00;
return fba;
}
/**
* Returns a key that will sort immediately after this key.
*
* @param part PartialKey except {@link PartialKey#ROW_COLFAM_COLQUAL_COLVIS_TIME_DEL}
*/
public Key followingKey(PartialKey part) {
Key returnKey = new Key();
switch (part) {
case ROW:
returnKey.row = followingArray(row);
break;
case ROW_COLFAM:
returnKey.row = row;
returnKey.colFamily = followingArray(colFamily);
break;
case ROW_COLFAM_COLQUAL:
returnKey.row = row;
returnKey.colFamily = colFamily;
returnKey.colQualifier = followingArray(colQualifier);
break;
case ROW_COLFAM_COLQUAL_COLVIS:
// This isn't useful for inserting into accumulo, but may be useful for lookups.
returnKey.row = row;
returnKey.colFamily = colFamily;
returnKey.colQualifier = colQualifier;
returnKey.colVisibility = followingArray(colVisibility);
break;
case ROW_COLFAM_COLQUAL_COLVIS_TIME:
returnKey.row = row;
returnKey.colFamily = colFamily;
returnKey.colQualifier = colQualifier;
returnKey.colVisibility = colVisibility;
returnKey.setTimestamp(timestamp - 1);
returnKey.deleted = false;
break;
default:
throw new IllegalArgumentException("Partial key specification " + part + " disallowed");
}
return returnKey;
}
/**
* Creates a key with the same row, column family, column qualifier, column visibility, timestamp,
* and delete marker as the given key.
*/
public Key(Key other) {
set(other);
}
/**
* Creates a key from Thrift.
*
* @param tkey Thrift key
*/
public Key(TKey tkey) {
this.row = toBytes(tkey.row);
this.colFamily = toBytes(tkey.colFamily);
this.colQualifier = toBytes(tkey.colQualifier);
this.colVisibility = toBytes(tkey.colVisibility);
this.timestamp = tkey.timestamp;
this.deleted = false;
if (row == null) {
throw new IllegalArgumentException("null row");
}
if (colFamily == null) {
throw new IllegalArgumentException("null column family");
}
if (colQualifier == null) {
throw new IllegalArgumentException("null column qualifier");
}
if (colVisibility == null) {
throw new IllegalArgumentException("null column visibility");
}
}
/**
* Writes the row ID into the given <code>Text</code>. This method gives users control over
* allocation of Text objects by copying into the passed in text.
*
* @param r <code>Text</code> object to copy into
* @return the <code>Text</code> that was passed in
*/
public Text getRow(Text r) {
r.set(row, 0, row.length);
return r;
}
/**
* Returns the row ID as a byte sequence. This method returns a pointer to the key's internal data
* and does not copy it.
*
* @return ByteSequence that points to the internal key row ID data
*/
public ByteSequence getRowData() {
return new ArrayByteSequence(row);
}
/**
* Gets the row ID as a <code>Text</code> object.
*
* @return Text containing the row ID
*/
public Text getRow() {
return getRow(new Text());
}
/**
* Compares this key's row ID with another.
*
* @param r row ID to compare
* @return same as {@link #getRow()}.compareTo(r)
*/
public int compareRow(Text r) {
return WritableComparator.compareBytes(row, 0, row.length, r.getBytes(), 0, r.getLength());
}
/**
* Returns the column family as a byte sequence. This method returns a pointer to the key's
* internal data and does not copy it.
*
* @return ByteSequence that points to the internal key column family data
*/
public ByteSequence getColumnFamilyData() {
return new ArrayByteSequence(colFamily);
}
/**
* Writes the column family into the given <code>Text</code>. This method gives users control over
* allocation of Text objects by copying into the passed in text.
*
* @param cf <code>Text</code> object to copy into
* @return the <code>Text</code> that was passed in
*/
public Text getColumnFamily(Text cf) {
cf.set(colFamily, 0, colFamily.length);
return cf;
}
/**
* Gets the column family as a <code>Text</code> object.
*
* @return Text containing the column family
*/
public Text getColumnFamily() {
return getColumnFamily(new Text());
}
/**
* Compares this key's column family with another.
*
* @param cf column family to compare
* @return same as {@link #getColumnFamily()}.compareTo(cf)
*/
public int compareColumnFamily(Text cf) {
return WritableComparator.compareBytes(colFamily, 0, colFamily.length, cf.getBytes(), 0,
cf.getLength());
}
/**
* Returns the column qualifier as a byte sequence. This method returns a pointer to the key's
* internal data and does not copy it.
*
* @return ByteSequence that points to the internal key column qualifier data
*/
public ByteSequence getColumnQualifierData() {
return new ArrayByteSequence(colQualifier);
}
/**
* Writes the column qualifier into the given <code>Text</code>. This method gives users control
* over allocation of Text objects by copying into the passed in text.
*
* @param cq <code>Text</code> object to copy into
* @return the <code>Text</code> that was passed in
*/
public Text getColumnQualifier(Text cq) {
cq.set(colQualifier, 0, colQualifier.length);
return cq;
}
/**
* Gets the column qualifier as a <code>Text</code> object.
*
* @return Text containing the column qualifier
*/
public Text getColumnQualifier() {
return getColumnQualifier(new Text());
}
/**
* Compares this key's column qualifier with another.
*
* @param cq column qualifier to compare
* @return same as {@link #getColumnQualifier()}.compareTo(cq)
*/
public int compareColumnQualifier(Text cq) {
return WritableComparator.compareBytes(colQualifier, 0, colQualifier.length, cq.getBytes(), 0,
cq.getLength());
}
/**
* Sets the timestamp.
*
* @param ts timestamp
*/
public void setTimestamp(long ts) {
this.timestamp = ts;
}
/**
* Gets the timestamp.
*
* @return timestamp
*/
public long getTimestamp() {
return timestamp;
}
/**
* Determines if this key is deleted (i.e., has a delete marker = true).
*
* @return true if key is deleted, false if not
*/
public boolean isDeleted() {
return deleted;
}
/**
* Sets the delete marker on this key.
*
* @param deleted delete marker (true to delete)
*/
public void setDeleted(boolean deleted) {
this.deleted = deleted;
}
/**
* Returns the column visibility as a byte sequence. This method returns a pointer to the key's
* internal data and does not copy it.
*
* @return ByteSequence that points to the internal key column visibility data
*/
public ByteSequence getColumnVisibilityData() {
return new ArrayByteSequence(colVisibility);
}
/**
* Gets the column visibility as a <code>Text</code> object.
*
* @return Text containing the column visibility
*/
public final Text getColumnVisibility() {
return getColumnVisibility(new Text());
}
/**
* Writes the column visibility into the given <code>Text</code>. This method gives users control
* over allocation of Text objects by copying into the passed in text.
*
* @param cv <code>Text</code> object to copy into
* @return the <code>Text</code> that was passed in
*/
public final Text getColumnVisibility(Text cv) {
cv.set(colVisibility, 0, colVisibility.length);
return cv;
}
/**
* Gets the column visibility. <b>WARNING:</b> using this method may inhibit performance since a
* new ColumnVisibility object is created on every call.
*
* @return ColumnVisibility representing the column visibility
* @since 1.5.0
*/
public final ColumnVisibility getColumnVisibilityParsed() {
return new ColumnVisibility(colVisibility);
}
/**
* Sets this key's row, column family, column qualifier, column visibility, timestamp, and delete
* marker to be the same as another key's. This method does not copy data from the other key, but
* only references to it.
*
* @param k key to set from
*/
public void set(Key k) {
row = k.row;
colFamily = k.colFamily;
colQualifier = k.colQualifier;
colVisibility = k.colVisibility;
timestamp = k.timestamp;
deleted = k.deleted;
}
@Override
public void readFields(DataInput in) throws IOException {
// this method is a little screwy so it will be compatible with older
// code that serialized data
int colFamilyOffset = WritableUtils.readVInt(in);
int colQualifierOffset = WritableUtils.readVInt(in);
int colVisibilityOffset = WritableUtils.readVInt(in);
int totalLen = WritableUtils.readVInt(in);
row = new byte[colFamilyOffset];
colFamily = new byte[colQualifierOffset - colFamilyOffset];
colQualifier = new byte[colVisibilityOffset - colQualifierOffset];
colVisibility = new byte[totalLen - colVisibilityOffset];
in.readFully(row);
in.readFully(colFamily);
in.readFully(colQualifier);
in.readFully(colVisibility);
timestamp = WritableUtils.readVLong(in);
deleted = in.readBoolean();
}
@Override
public void write(DataOutput out) throws IOException {
int colFamilyOffset = row.length;
int colQualifierOffset = colFamilyOffset + colFamily.length;
int colVisibilityOffset = colQualifierOffset + colQualifier.length;
int totalLen = colVisibilityOffset + colVisibility.length;
WritableUtils.writeVInt(out, colFamilyOffset);
WritableUtils.writeVInt(out, colQualifierOffset);
WritableUtils.writeVInt(out, colVisibilityOffset);
WritableUtils.writeVInt(out, totalLen);
out.write(row);
out.write(colFamily);
out.write(colQualifier);
out.write(colVisibility);
WritableUtils.writeVLong(out, timestamp);
out.writeBoolean(deleted);
}
/**
* Compares part of a key. For example, compares just the row and column family, and if those are
* equal then return true.
*
* @param other key to compare to
* @param part part of key to compare
* @return true if specified parts of keys match, false otherwise
*/
public boolean equals(Key other, PartialKey part) {
switch (part) {
case ROW:
return isEqual(row, other.row);
case ROW_COLFAM:
return isEqual(row, other.row) && isEqual(colFamily, other.colFamily);
case ROW_COLFAM_COLQUAL:
return isEqual(row, other.row) && isEqual(colFamily, other.colFamily)
&& isEqual(colQualifier, other.colQualifier);
case ROW_COLFAM_COLQUAL_COLVIS:
return isEqual(row, other.row) && isEqual(colFamily, other.colFamily)
&& isEqual(colQualifier, other.colQualifier)
&& isEqual(colVisibility, other.colVisibility);
case ROW_COLFAM_COLQUAL_COLVIS_TIME:
return isEqual(row, other.row) && isEqual(colFamily, other.colFamily)
&& isEqual(colQualifier, other.colQualifier)
&& isEqual(colVisibility, other.colVisibility) && timestamp == other.timestamp;
case ROW_COLFAM_COLQUAL_COLVIS_TIME_DEL:
return isEqual(row, other.row) && isEqual(colFamily, other.colFamily)
&& isEqual(colQualifier, other.colQualifier)
&& isEqual(colVisibility, other.colVisibility) && timestamp == other.timestamp
&& deleted == other.deleted;
default:
throw new IllegalArgumentException("Unrecognized partial key specification " + part);
}
}
/**
* Compares elements of a key given by a {@link PartialKey}. The corresponding elements (row,
* column family, column qualifier, column visibility, timestamp, and delete marker) are compared
* in order until unequal elements are found. The row, column family, column qualifier, and column
* visibility are compared lexicographically and sorted ascending. The timestamps are compared
* numerically and sorted descending so that the most recent data comes first. Lastly, a delete
* marker of true sorts before a delete marker of false. The result of the first unequal
* comparison is returned.
*
* For example, for {@link PartialKey#ROW_COLFAM}, this method compares just the row and column
* family. If the row IDs are not equal, return the result of the row comparison; otherwise,
* returns the result of the column family comparison.
*
* @param other key to compare to
* @param part part of key to compare
* @return comparison result
* @see #compareTo(Key)
*/
public int compareTo(Key other, PartialKey part) {
// check for matching row
int result =
WritableComparator.compareBytes(row, 0, row.length, other.row, 0, other.row.length);
if (result != 0 || part.equals(PartialKey.ROW)) {
return result;
}
// check for matching column family
result = WritableComparator.compareBytes(colFamily, 0, colFamily.length, other.colFamily, 0,
other.colFamily.length);
if (result != 0 || part.equals(PartialKey.ROW_COLFAM)) {
return result;
}
// check for matching column qualifier
result = WritableComparator.compareBytes(colQualifier, 0, colQualifier.length,
other.colQualifier, 0, other.colQualifier.length);
if (result != 0 || part.equals(PartialKey.ROW_COLFAM_COLQUAL)) {
return result;
}
// check for matching column visibility
result = WritableComparator.compareBytes(colVisibility, 0, colVisibility.length,
other.colVisibility, 0, other.colVisibility.length);
if (result != 0 || part.equals(PartialKey.ROW_COLFAM_COLQUAL_COLVIS)) {
return result;
}
// check for matching timestamp
result = Long.compare(other.timestamp, timestamp);
if (result != 0 || part.equals(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME)) {
return result;
}
// check for matching deleted flag
if (deleted) {
result = other.deleted ? 0 : -1;
} else {
result = other.deleted ? 1 : 0;
}
return result;
}
@Override
public int compareTo(Key other) {
return compareTo(other, PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME_DEL);
}
@Override
public int hashCode() {
return WritableComparator.hashBytes(row, row.length)
+ WritableComparator.hashBytes(colFamily, colFamily.length)
+ WritableComparator.hashBytes(colQualifier, colQualifier.length)
+ WritableComparator.hashBytes(colVisibility, colVisibility.length)
+ (int) (timestamp ^ (timestamp >>> 32));
}
/**
* Returns an ASCII printable string form of the given byte array, treating the bytes as ASCII
* characters. See {@link #appendPrintableString(byte[], int, int, int, StringBuilder)} for
* caveats.
*
* @param ba byte array
* @param offset offset to start with in byte array (inclusive)
* @param len number of bytes to print
* @param maxLen maximum number of bytes to convert to printable form
* @return printable string
* @see #appendPrintableString(byte[], int, int, int, StringBuilder)
*/
public static String toPrintableString(byte[] ba, int offset, int len, int maxLen) {
return appendPrintableString(ba, offset, len, maxLen, new StringBuilder()).toString();
}
/**
* Appends ASCII printable characters to a string, based on the given byte array, treating the
* bytes as ASCII characters. If a byte can be converted to a ASCII printable character it is
* appended as is; otherwise, it is appended as a character code, e.g., %05; for byte value 5. If
* len > maxlen, the string includes a "TRUNCATED" note at the end.
*
* @param ba byte array
* @param offset offset to start with in byte array (inclusive)
* @param len number of bytes to print
* @param maxLen maximum number of bytes to convert to printable form
* @param sb <code>StringBuilder</code> to append to
* @return given <code>StringBuilder</code>
*/
public static StringBuilder appendPrintableString(byte[] ba, int offset, int len, int maxLen,
StringBuilder sb) {
int plen = Math.min(len, maxLen);
for (int i = 0; i < plen; i++) {
int c = 0xff & ba[offset + i];
if (c >= 32 && c <= 126) {
sb.append((char) c);
} else {
sb.append("%" + String.format("%02x;", c));
}
}
if (len > maxLen) {
sb.append("... TRUNCATED");
}
return sb;
}
private StringBuilder rowColumnStringBuilder() {
return rowColumnStringBuilder(Constants.MAX_DATA_TO_PRINT);
}
private StringBuilder rowColumnStringBuilder(int maxComponentLength) {
StringBuilder sb = new StringBuilder();
appendPrintableString(row, 0, row.length, maxComponentLength, sb);
sb.append(" ");
appendPrintableString(colFamily, 0, colFamily.length, maxComponentLength, sb);
sb.append(":");
appendPrintableString(colQualifier, 0, colQualifier.length, maxComponentLength, sb);
sb.append(" [");
appendPrintableString(colVisibility, 0, colVisibility.length, maxComponentLength, sb);
sb.append("]");
return sb;
}
@Override
public String toString() {
StringBuilder sb = rowColumnStringBuilder();
sb.append(" ");
sb.append(timestamp);
sb.append(" ");
sb.append(deleted);
return sb.toString();
}
/**
* Stringify this {@link Key}, avoiding truncation of each component, only limiting each component
* to a length of {@link Integer#MAX_VALUE}
*
* @since 1.7.0
*/
public String toStringNoTruncate() {
StringBuilder sb = rowColumnStringBuilder(Integer.MAX_VALUE);
sb.append(" ");
sb.append(timestamp);
sb.append(" ");
sb.append(deleted);
return sb.toString();
}
/**
* Converts this key to a string, not including timestamp or delete marker.
*
* @return string form of key
*/
public String toStringNoTime() {
return rowColumnStringBuilder().toString();
}
/**
* Returns the sums of the lengths of the row, column family, column qualifier, and column
* visibility.
*
* @return sum of key field lengths
*/
public int getLength() {
return row.length + colFamily.length + colQualifier.length + colVisibility.length;
}
/**
* Same as {@link #getLength()}.
*
* @return sum of key field lengths
*/
public int getSize() {
return getLength();
}
private static boolean isEqual(byte[] a1, byte[] a2) {
if (a1 == a2) {
return true;
}
int last = a1.length;
if (last != a2.length) {
return false;
}
if (last == 0) {
return true;
}
// since sorted data is usually compared in accumulo,
// the prefixes will normally be the same... so compare
// the last two characters first.. the most likely place
// to have disorder is at end of the strings when the
// data is sorted... if those are the same compare the rest
// of the data forward... comparing backwards is slower
// (compiler and cpu optimized for reading data forward)..
// do not want slower comparisons when data is equal...
// sorting brings equals data together
last--;
if (a1[last] == a2[last]) {
for (int i = 0; i < last; i++) {
if (a1[i] != a2[i]) {
return false;
}
}
} else {
return false;
}
return true;
}
/**
* Compresses a list of key/value pairs before sending them via thrift.
*
* @param param list of key/value pairs
* @return list of Thrift key/value pairs
*/
public static List<TKeyValue> compress(List<? extends KeyValue> param) {
List<TKeyValue> tkvl = Arrays.asList(new TKeyValue[param.size()]);
if (!param.isEmpty()) {
tkvl.set(0, new TKeyValue(param.get(0).getKey().toThrift(),
ByteBuffer.wrap(param.get(0).getValue().get())));
}
for (int i = param.size() - 1; i > 0; i--) {
Key prevKey = param.get(i - 1).getKey();
KeyValue kv = param.get(i);
Key key = kv.getKey();
TKey newKey = null;
if (isEqual(prevKey.row, key.row)) {
newKey = key.toThrift();
newKey.row = null;
}
if (isEqual(prevKey.colFamily, key.colFamily)) {
if (newKey == null) {
newKey = key.toThrift();
}
newKey.colFamily = null;
}
if (isEqual(prevKey.colQualifier, key.colQualifier)) {
if (newKey == null) {
newKey = key.toThrift();
}
newKey.colQualifier = null;
}
if (isEqual(prevKey.colVisibility, key.colVisibility)) {
if (newKey == null) {
newKey = key.toThrift();
}
newKey.colVisibility = null;
}
if (newKey == null) {
newKey = key.toThrift();
}
tkvl.set(i, new TKeyValue(newKey, ByteBuffer.wrap(kv.getValue().get())));
}
return tkvl;
}
/**
* Decompresses a list of key/value pairs received from thrift. Decompression occurs in place, in
* the list.
*
* @param param list of Thrift key/value pairs
*/
public static void decompress(List<TKeyValue> param) {
for (int i = 1; i < param.size(); i++) {
TKey prevKey = param.get(i - 1).key;
TKey key = param.get(i).key;
if (key.row == null) {
key.row = prevKey.row;
}
if (key.colFamily == null) {
key.colFamily = prevKey.colFamily;
}
if (key.colQualifier == null) {
key.colQualifier = prevKey.colQualifier;
}
if (key.colVisibility == null) {
key.colVisibility = prevKey.colVisibility;
}
}
}
/**
* Gets the row ID as a byte array.
*
* @return row ID
*/
byte[] getRowBytes() {
return row;
}
/**
* Gets the column family as a byte array.
*
* @return column family
*/
byte[] getColFamily() {
return colFamily;
}
/**
* Gets the column qualifier as a byte array.
*
* @return column qualifier
*/
byte[] getColQualifier() {
return colQualifier;
}
/**
* Gets the column visibility as a byte array.
*
* @return column visibility
*/
byte[] getColVisibility() {
return colVisibility;
}
/**
* Converts this key to Thrift.
*
* @return Thrift key
*/
public TKey toThrift() {
return new TKey(ByteBuffer.wrap(row), ByteBuffer.wrap(colFamily), ByteBuffer.wrap(colQualifier),
ByteBuffer.wrap(colVisibility), timestamp);
}
@Override
public Object clone() throws CloneNotSupportedException {
Key r = (Key) super.clone();
r.row = Arrays.copyOf(row, row.length);
r.colFamily = Arrays.copyOf(colFamily, colFamily.length);
r.colQualifier = Arrays.copyOf(colQualifier, colQualifier.length);
r.colVisibility = Arrays.copyOf(colVisibility, colVisibility.length);
return r;
}
}
| 9,937 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/TabletId.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import org.apache.hadoop.io.Text;
/**
* A TabletId provides the information needed to uniquely identify a tablet.
*
* @since 1.7.0
*/
public interface TabletId extends Comparable<TabletId> {
/**
* @since 2.1.0
*/
TableId getTable();
Text getEndRow();
Text getPrevEndRow();
/**
* @return a range based on the row range of the tablet. The range will cover
* {@code (<prev end row>, <end row>]}.
* @since 1.8.0
*/
Range toRange();
}
| 9,938 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/TableId.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
/**
* A strongly typed representation of a table ID. This class cannot be used to get a table ID from a
* table name, but does provide the table ID string wrapped with a stronger type. The constructor
* for this class will throw an error if the canonical parameter is null.
*
* @since 2.0.0
*/
public class TableId extends AbstractId<TableId> {
private static final long serialVersionUID = 1L;
// cache is for canonicalization/deduplication of created objects,
// to limit the number of TableId objects in the JVM at any given moment
// WeakReferences are used because we don't need them to stick around any longer than they need to
static final Cache<String,TableId> cache = Caffeine.newBuilder().weakValues().build();
private TableId(final String canonical) {
super(canonical);
}
/**
* Get a TableId object for the provided canonical string. This is guaranteed to be non-null.
*
* @param canonical table ID string
* @return TableId object
*/
public static TableId of(final String canonical) {
return cache.get(canonical, k -> new TableId(canonical));
}
}
| 9,939 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/KeyValue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data;
import java.nio.ByteBuffer;
import java.util.AbstractMap.SimpleImmutableEntry;
/**
* A key/value pair. The key and value may not be set after construction.
*/
public class KeyValue extends SimpleImmutableEntry<Key,Value> {
private static final long serialVersionUID = 1L;
/**
* Creates a new key/value pair.
*
* @param key key
* @param value bytes of value
*/
public KeyValue(Key key, byte[] value) {
super(key, new Value(value, false));
}
/**
* Creates a new key/value pair.
*
* @param key key
* @param value buffer containing bytes of value
*/
public KeyValue(Key key, ByteBuffer value) {
super(key, new Value(value));
}
/**
* Creates a new key/value pair.
*
* @param key key
* @param value buffer containing bytes of value
*/
public KeyValue(Key key, Value value) {
super(key, value);
}
}
| 9,940 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/constraints/DefaultKeySizeConstraint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data.constraints;
import java.util.ArrayList;
import java.util.List;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Mutation;
/**
* A constraints that limits the size of keys to 1mb.
*
* @since 2.1.0 moved from org.apache.accumulo.core.constraints package
*/
public class DefaultKeySizeConstraint implements Constraint {
protected static final short MAX__KEY_SIZE_EXCEEDED_VIOLATION = 1;
protected static final long maxSize = 1048576; // 1MB default size
@Override
public String getViolationDescription(short violationCode) {
switch (violationCode) {
case MAX__KEY_SIZE_EXCEEDED_VIOLATION:
return "Key was larger than 1MB";
}
return null;
}
static final List<Short> NO_VIOLATIONS = new ArrayList<>();
@Override
public List<Short> check(Environment env, Mutation mutation) {
// fast size check
if (mutation.numBytes() < maxSize) {
return NO_VIOLATIONS;
}
List<Short> violations = new ArrayList<>();
for (ColumnUpdate cu : mutation.getUpdates()) {
int size = mutation.getRow().length;
size += cu.getColumnFamily().length;
size += cu.getColumnQualifier().length;
size += cu.getColumnVisibility().length;
if (size > maxSize) {
violations.add(MAX__KEY_SIZE_EXCEEDED_VIOLATION);
}
}
return violations;
}
}
| 9,941 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/constraints/VisibilityConstraint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data.constraints;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.core.security.VisibilityEvaluator;
import org.apache.accumulo.core.security.VisibilityParseException;
import org.apache.accumulo.core.util.BadArgumentException;
/**
* A constraint that checks the visibility of columns against the actor's authorizations. Violation
* codes:
* <ul>
* <li>1 = failure to parse visibility expression</li>
* <li>2 = insufficient authorization</li>
* </ul>
*
* @since 2.1.0 moved from org.apache.accumulo.core.constraints package
*/
public class VisibilityConstraint implements Constraint {
@Override
public String getViolationDescription(short violationCode) {
switch (violationCode) {
case 1:
return "Malformed column visibility";
case 2:
return "User does not have authorization on column visibility";
}
return null;
}
@Override
public List<Short> check(Environment env, Mutation mutation) {
List<ColumnUpdate> updates = mutation.getUpdates();
HashSet<String> ok = null;
if (updates.size() > 1) {
ok = new HashSet<>();
}
VisibilityEvaluator ve = null;
for (ColumnUpdate update : updates) {
byte[] cv = update.getColumnVisibility();
if (cv.length > 0) {
String key = null;
if (ok != null && ok.contains(key = new String(cv, UTF_8))) {
continue;
}
try {
if (ve == null) {
ve = new VisibilityEvaluator(env.getAuthorizationsContainer());
}
if (!ve.evaluate(new ColumnVisibility(cv))) {
return Collections.singletonList((short) 2);
}
} catch (BadArgumentException | VisibilityParseException bae) {
return Collections.singletonList((short) 1);
}
if (ok != null) {
ok.add(key);
}
}
}
return null;
}
}
| 9,942 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/constraints/NoDeleteConstraint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data.constraints;
import java.util.Collections;
import java.util.List;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Mutation;
/**
* This constraint ensures mutations do not have deletes.
*
* @since 2.1.0 moved from org.apache.accumulo.core.constraints package
*/
public class NoDeleteConstraint implements Constraint {
@Override
public String getViolationDescription(short violationCode) {
if (violationCode == 1) {
return "Deletes are not allowed";
}
return null;
}
@Override
public List<Short> check(Environment env, Mutation mutation) {
List<ColumnUpdate> updates = mutation.getUpdates();
for (ColumnUpdate update : updates) {
if (update.isDeleted()) {
return Collections.singletonList((short) 1);
}
}
return null;
}
}
| 9,943 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/data/constraints/Constraint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.data.constraints;
import java.util.List;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.security.AuthorizationContainer;
/**
* Constraint objects are used to determine if mutations will be applied to a table.
*
* <p>
* This interface expects implementers to return violation codes. The reason codes are returned
* instead of arbitrary strings to encourage conciseness. Conciseness is needed because violations
* are aggregated. If a user sends a batch of 10,000 mutations to Accumulo, only aggregated counts
* about which violations occurred are returned. If the constraint implementer were allowed to
* return arbitrary violation strings like the following:
*
* <p>
* Value "abc" is not a number<br>
* Value "vbg" is not a number
*
* <p>
* This would not aggregate very well, because the same violation is represented with two different
* strings.
*
* @since 2.1.0 Replaces interface in org.apache.accumulo.core.constraints package
*/
public interface Constraint {
/**
* The environment within which a constraint exists.
*
* @since 2.1.0
*/
interface Environment {
/**
* Gets the tablet Id of the environment.
*
* @return TabletId
*/
TabletId getTablet();
/**
* Gets the user within the environment.
*
* @return user
*/
String getUser();
/**
* Gets the authorizations in the environment.
*
* @return authorizations
*/
AuthorizationContainer getAuthorizationsContainer();
}
/**
* Gets a short, one-sentence description of what a given violation code means.
*
* @param violationCode numeric violation code
* @return matching violation description
*/
String getViolationDescription(short violationCode);
/**
* Checks a mutation for constraint violations. If the mutation contains no violations, returns
* null. Otherwise, returns a list of violation codes.
*
* Violation codes must be non-negative. Negative violation codes are reserved for system use.
*
* @param env constraint environment
* @param mutation mutation to check
* @return list of violation codes, or null if none
*/
List<Short> check(Environment env, Mutation mutation);
}
| 9,944 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/Property.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import java.lang.annotation.Annotation;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.function.Predicate;
import org.apache.accumulo.core.classloader.ClassLoaderUtil;
import org.apache.accumulo.core.data.constraints.NoDeleteConstraint;
import org.apache.accumulo.core.file.rfile.RFile;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iteratorsImpl.system.DeletingIterator;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner;
import org.apache.accumulo.core.spi.compaction.SimpleCompactionDispatcher;
import org.apache.accumulo.core.spi.fs.RandomVolumeChooser;
import org.apache.accumulo.core.spi.scan.ScanDispatcher;
import org.apache.accumulo.core.spi.scan.ScanPrioritizer;
import org.apache.accumulo.core.spi.scan.ScanServerSelector;
import org.apache.accumulo.core.spi.scan.SimpleScanDispatcher;
import org.apache.accumulo.core.util.format.DefaultFormatter;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
public enum Property {
// SSL properties local to each node (see also instance.ssl.enabled which must be consistent
// across all nodes in an instance)
RPC_PREFIX("rpc.", null, PropertyType.PREFIX,
"Properties in this category related to the configuration of SSL keys for"
+ " RPC. See also `instance.ssl.enabled`.",
"1.6.0"),
RPC_BACKLOG("rpc.backlog", "50", PropertyType.COUNT,
"Configures the TCP backlog for the server side sockets created by Thrift."
+ " This property is not used for SSL type server sockets. A value of zero"
+ " will use the Thrift default value.",
"2.1.3"),
RPC_SSL_KEYSTORE_PATH("rpc.javax.net.ssl.keyStore", "", PropertyType.PATH,
"Path of the keystore file for the server's private SSL key.", "1.6.0"),
@Sensitive
RPC_SSL_KEYSTORE_PASSWORD("rpc.javax.net.ssl.keyStorePassword", "", PropertyType.STRING,
"Password used to encrypt the SSL private keystore. "
+ "Leave blank to use the Accumulo instance secret.",
"1.6.0"),
RPC_SSL_KEYSTORE_TYPE("rpc.javax.net.ssl.keyStoreType", "jks", PropertyType.STRING,
"Type of SSL keystore.", "1.6.0"),
RPC_SSL_TRUSTSTORE_PATH("rpc.javax.net.ssl.trustStore", "", PropertyType.PATH,
"Path of the truststore file for the root cert.", "1.6.0"),
@Sensitive
RPC_SSL_TRUSTSTORE_PASSWORD("rpc.javax.net.ssl.trustStorePassword", "", PropertyType.STRING,
"Password used to encrypt the SSL truststore. Leave blank to use no password.", "1.6.0"),
RPC_SSL_TRUSTSTORE_TYPE("rpc.javax.net.ssl.trustStoreType", "jks", PropertyType.STRING,
"Type of SSL truststore.", "1.6.0"),
RPC_USE_JSSE("rpc.useJsse", "false", PropertyType.BOOLEAN,
"Use JSSE system properties to configure SSL rather than the " + RPC_PREFIX.getKey()
+ "javax.net.ssl.* Accumulo properties.",
"1.6.0"),
RPC_SSL_CIPHER_SUITES("rpc.ssl.cipher.suites", "", PropertyType.STRING,
"Comma separated list of cipher suites that can be used by accepted connections.", "1.6.1"),
RPC_SSL_ENABLED_PROTOCOLS("rpc.ssl.server.enabled.protocols", "TLSv1.3", PropertyType.STRING,
"Comma separated list of protocols that can be used to accept connections.", "1.6.2"),
RPC_SSL_CLIENT_PROTOCOL("rpc.ssl.client.protocol", "TLSv1.3", PropertyType.STRING,
"The protocol used to connect to a secure server. Must be in the list of enabled protocols "
+ "on the server side `rpc.ssl.server.enabled.protocols`.",
"1.6.2"),
RPC_SASL_QOP("rpc.sasl.qop", "auth", PropertyType.STRING,
"The quality of protection to be used with SASL. Valid values are 'auth', 'auth-int',"
+ " and 'auth-conf'.",
"1.7.0"),
// instance properties (must be the same for every node in an instance)
INSTANCE_PREFIX("instance.", null, PropertyType.PREFIX,
"Properties in this category must be consistent throughout a cloud. "
+ "This is enforced and servers won't be able to communicate if these differ.",
"1.3.5"),
INSTANCE_ZK_HOST("instance.zookeeper.host", "localhost:2181", PropertyType.HOSTLIST,
"Comma separated list of zookeeper servers.", "1.3.5"),
INSTANCE_ZK_TIMEOUT("instance.zookeeper.timeout", "30s", PropertyType.TIMEDURATION,
"Zookeeper session timeout; "
+ "max value when represented as milliseconds should be no larger than "
+ Integer.MAX_VALUE + ".",
"1.3.5"),
@Sensitive
INSTANCE_SECRET("instance.secret", "DEFAULT", PropertyType.STRING,
"A secret unique to a given instance that all servers must know in order"
+ " to communicate with one another. It should be changed prior to the"
+ " initialization of Accumulo. To change it after Accumulo has been"
+ " initialized, use the ChangeSecret tool and then update accumulo.properties"
+ " everywhere. Before using the ChangeSecret tool, make sure Accumulo is not"
+ " running and you are logged in as the user that controls Accumulo files in"
+ " HDFS. To use the ChangeSecret tool, run the command: `./bin/accumulo"
+ " org.apache.accumulo.server.util.ChangeSecret`.",
"1.3.5"),
INSTANCE_VOLUMES("instance.volumes", "", PropertyType.STRING,
"A comma separated list of dfs uris to use. Files will be stored across"
+ " these filesystems. In some situations, the first volume in this list"
+ " may be treated differently, such as being preferred for writing out"
+ " temporary files (for example, when creating a pre-split table)."
+ " After adding uris to this list, run 'accumulo init --add-volume' and then"
+ " restart tservers. If entries are removed from this list then tservers"
+ " will need to be restarted. After a uri is removed from the list Accumulo"
+ " will not create new files in that location, however Accumulo can still"
+ " reference files created at that location before the config change. To use"
+ " a comma or other reserved characters in a URI use standard URI hex"
+ " encoding. For example replace commas with %2C.",
"1.6.0"),
INSTANCE_VOLUME_CONFIG_PREFIX("instance.volume.config.", null, PropertyType.PREFIX,
"Properties in this category are used to provide volume specific overrides to "
+ "the general filesystem client configuration. Properties using this prefix "
+ "should be in the form "
+ "'instance.volume.config.<volume-uri>.<property-name>=<property-value>. An "
+ "example: "
+ "'instance.volume.config.hdfs://namespace-a:8020/accumulo.dfs.client.hedged.read.threadpool.size=10'. "
+ "Note that when specifying property names that contain colons in the properties "
+ "files that the colons need to be escaped with a backslash.",
"2.1.1"),
INSTANCE_VOLUMES_REPLACEMENTS("instance.volumes.replacements", "", PropertyType.STRING,
"Since accumulo stores absolute URIs changing the location of a namenode "
+ "could prevent Accumulo from starting. The property helps deal with "
+ "that situation. Provide a comma separated list of uri replacement "
+ "pairs here if a namenode location changes. Each pair should be separated "
+ "with a space. For example, if hdfs://nn1 was replaced with "
+ "hdfs://nnA and hdfs://nn2 was replaced with hdfs://nnB, then set this "
+ "property to 'hdfs://nn1 hdfs://nnA,hdfs://nn2 hdfs://nnB' "
+ "Replacements must be configured for use. To see which volumes are "
+ "currently in use, run 'accumulo admin volumes -l'. To use a comma or "
+ "other reserved characters in a URI use standard URI hex encoding. For "
+ "example replace commas with %2C.",
"1.6.0"),
INSTANCE_VOLUMES_UPGRADE_RELATIVE("instance.volumes.upgrade.relative", "", PropertyType.STRING,
"The volume dfs uri containing relative tablet file paths. Relative paths may exist in the metadata from "
+ "versions prior to 1.6. This property is only required if a relative path is detected "
+ "during the upgrade process and will only be used once.",
"2.1.0"),
@Experimental // interface uses unstable internal types, use with caution
INSTANCE_SECURITY_AUTHENTICATOR("instance.security.authenticator",
"org.apache.accumulo.server.security.handler.ZKAuthenticator", PropertyType.CLASSNAME,
"The authenticator class that accumulo will use to determine if a user "
+ "has privilege to perform an action.",
"1.5.0"),
@Experimental // interface uses unstable internal types, use with caution
INSTANCE_SECURITY_AUTHORIZOR("instance.security.authorizor",
"org.apache.accumulo.server.security.handler.ZKAuthorizor", PropertyType.CLASSNAME,
"The authorizor class that accumulo will use to determine what labels a "
+ "user has privilege to see.",
"1.5.0"),
@Experimental // interface uses unstable internal types, use with caution
INSTANCE_SECURITY_PERMISSION_HANDLER("instance.security.permissionHandler",
"org.apache.accumulo.server.security.handler.ZKPermHandler", PropertyType.CLASSNAME,
"The permission handler class that accumulo will use to determine if a "
+ "user has privilege to perform an action.",
"1.5.0"),
INSTANCE_RPC_SSL_ENABLED("instance.rpc.ssl.enabled", "false", PropertyType.BOOLEAN,
"Use SSL for socket connections from clients and among accumulo services. "
+ "Mutually exclusive with SASL RPC configuration.",
"1.6.0"),
INSTANCE_RPC_SSL_CLIENT_AUTH("instance.rpc.ssl.clientAuth", "false", PropertyType.BOOLEAN,
"Require clients to present certs signed by a trusted root.", "1.6.0"),
INSTANCE_RPC_SASL_ENABLED("instance.rpc.sasl.enabled", "false", PropertyType.BOOLEAN,
"Configures Thrift RPCs to require SASL with GSSAPI which supports "
+ "Kerberos authentication. Mutually exclusive with SSL RPC configuration.",
"1.7.0"),
INSTANCE_RPC_SASL_ALLOWED_USER_IMPERSONATION("instance.rpc.sasl.allowed.user.impersonation", "",
PropertyType.STRING,
"One-line configuration property controlling what users are allowed to "
+ "impersonate other users.",
"1.7.1"),
INSTANCE_RPC_SASL_ALLOWED_HOST_IMPERSONATION("instance.rpc.sasl.allowed.host.impersonation", "",
PropertyType.STRING,
"One-line configuration property controlling the network locations "
+ "(hostnames) that are allowed to impersonate other users.",
"1.7.1"),
// Crypto-related properties
@Experimental
INSTANCE_CRYPTO_PREFIX("instance.crypto.opts.", null, PropertyType.PREFIX,
"Properties related to on-disk file encryption.", "2.0.0"),
@Experimental
@Sensitive
INSTANCE_CRYPTO_SENSITIVE_PREFIX("instance.crypto.opts.sensitive.", null, PropertyType.PREFIX,
"Sensitive properties related to on-disk file encryption.", "2.0.0"),
@Experimental
INSTANCE_CRYPTO_FACTORY("instance.crypto.opts.factory",
"org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory", PropertyType.CLASSNAME,
"The class which provides crypto services for on-disk file encryption. The default does nothing. To enable "
+ "encryption, replace this classname with an implementation of the"
+ "org.apache.accumulo.core.spi.crypto.CryptoFactory interface.",
"2.1.0"),
// general properties
GENERAL_PREFIX("general.", null, PropertyType.PREFIX,
"Properties in this category affect the behavior of accumulo overall, but"
+ " do not have to be consistent throughout a cloud.",
"1.3.5"),
GENERAL_CONTEXT_CLASSLOADER_FACTORY("general.context.class.loader.factory", "",
PropertyType.CLASSNAME,
"Name of classloader factory to be used to create classloaders for named contexts,"
+ " such as per-table contexts set by `table.class.loader.context`.",
"2.1.0"),
GENERAL_FILE_NAME_ALLOCATION_BATCH_SIZE_MIN("general.file.name.allocation.batch.size.min", "100",
PropertyType.COUNT,
"The minimum number of filenames that will be allocated from ZooKeeper at a time.", "2.1.3"),
GENERAL_FILE_NAME_ALLOCATION_BATCH_SIZE_MAX("general.file.name.allocation.batch.size.max", "200",
PropertyType.COUNT,
"The maximum number of filenames that will be allocated from ZooKeeper at a time.", "2.1.3"),
GENERAL_RPC_TIMEOUT("general.rpc.timeout", "120s", PropertyType.TIMEDURATION,
"Time to wait on I/O for simple, short RPC calls.", "1.3.5"),
@Experimental
GENERAL_RPC_SERVER_TYPE("general.rpc.server.type", "", PropertyType.STRING,
"Type of Thrift server to instantiate, see "
+ "org.apache.accumulo.server.rpc.ThriftServerType for more information. "
+ "Only useful for benchmarking thrift servers.",
"1.7.0"),
GENERAL_KERBEROS_KEYTAB("general.kerberos.keytab", "", PropertyType.PATH,
"Path to the kerberos keytab to use. Leave blank if not using kerberoized hdfs.", "1.4.1"),
GENERAL_KERBEROS_PRINCIPAL("general.kerberos.principal", "", PropertyType.STRING,
"Name of the kerberos principal to use. _HOST will automatically be "
+ "replaced by the machines hostname in the hostname portion of the "
+ "principal. Leave blank if not using kerberoized hdfs.",
"1.4.1"),
GENERAL_KERBEROS_RENEWAL_PERIOD("general.kerberos.renewal.period", "30s",
PropertyType.TIMEDURATION,
"The amount of time between attempts to perform Kerberos ticket renewals."
+ " This does not equate to how often tickets are actually renewed (which is"
+ " performed at 80% of the ticket lifetime).",
"1.6.5"),
GENERAL_MAX_MESSAGE_SIZE("general.server.message.size.max", "1G", PropertyType.BYTES,
"The maximum size of a message that can be sent to a server.", "1.5.0"),
@Experimental
GENERAL_OPENTELEMETRY_ENABLED("general.opentelemetry.enabled", "false", PropertyType.BOOLEAN,
"Enables tracing functionality using OpenTelemetry (assuming OpenTelemetry is configured).",
"2.1.0"),
GENERAL_THREADPOOL_SIZE("general.server.threadpool.size", "1", PropertyType.COUNT,
"The number of threads to use for server-internal scheduled tasks.", "2.1.0"),
// If you update the default type, be sure to update the default used for initialization failures
// in VolumeManagerImpl
@Experimental
GENERAL_VOLUME_CHOOSER("general.volume.chooser", RandomVolumeChooser.class.getName(),
PropertyType.CLASSNAME,
"The class that will be used to select which volume will be used to create new files.",
"1.6.0"),
GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS("general.security.credential.provider.paths", "",
PropertyType.STRING, "Comma-separated list of paths to CredentialProviders.", "1.6.1"),
GENERAL_ARBITRARY_PROP_PREFIX("general.custom.", null, PropertyType.PREFIX,
"Prefix to be used for user defined system-wide properties. This may be"
+ " particularly useful for system-wide configuration for various"
+ " user-implementations of pluggable Accumulo features, such as the balancer"
+ " or volume chooser.",
"2.0.0"),
GENERAL_DELEGATION_TOKEN_LIFETIME("general.delegation.token.lifetime", "7d",
PropertyType.TIMEDURATION,
"The length of time that delegation tokens and secret keys are valid.", "1.7.0"),
GENERAL_DELEGATION_TOKEN_UPDATE_INTERVAL("general.delegation.token.update.interval", "1d",
PropertyType.TIMEDURATION, "The length of time between generation of new secret keys.",
"1.7.0"),
GENERAL_LOW_MEM_DETECTOR_INTERVAL("general.low.mem.detector.interval", "5s",
PropertyType.TIMEDURATION, "The time interval between low memory checks.", "3.0.0"),
GENERAL_LOW_MEM_DETECTOR_THRESHOLD("general.low.mem.detector.threshold", "0.05",
PropertyType.FRACTION,
"The LowMemoryDetector will report when free memory drops below this percentage of total memory.",
"3.0.0"),
GENERAL_LOW_MEM_SCAN_PROTECTION("general.low.mem.protection.scan", "false", PropertyType.BOOLEAN,
"Scans may be paused or return results early when the server "
+ "is low on memory and this property is set to true. Enabling this property will incur a slight "
+ "scan performance penalty when the server is not low on memory.",
"3.0.0"),
GENERAL_LOW_MEM_MINC_PROTECTION("general.low.mem.protection.compaction.minc", "false",
PropertyType.BOOLEAN,
"Minor compactions may be paused when the server "
+ "is low on memory and this property is set to true. Enabling this property will incur a slight "
+ "compaction performance penalty when the server is not low on memory.",
"3.0.0"),
GENERAL_LOW_MEM_MAJC_PROTECTION("general.low.mem.protection.compaction.majc", "false",
PropertyType.BOOLEAN,
"Major compactions may be paused when the server "
+ "is low on memory and this property is set to true. Enabling this property will incur a slight "
+ "compaction performance penalty when the server is not low on memory.",
"3.0.0"),
GENERAL_MAX_SCANNER_RETRY_PERIOD("general.max.scanner.retry.period", "5s",
PropertyType.TIMEDURATION,
"The maximum amount of time that a Scanner should wait before retrying a failed RPC.",
"1.7.3"),
GENERAL_MICROMETER_ENABLED("general.micrometer.enabled", "false", PropertyType.BOOLEAN,
"Enables metrics functionality using Micrometer.", "2.1.0"),
GENERAL_MICROMETER_JVM_METRICS_ENABLED("general.micrometer.jvm.metrics.enabled", "false",
PropertyType.BOOLEAN, "Enables JVM metrics functionality using Micrometer.", "2.1.0"),
GENERAL_MICROMETER_FACTORY("general.micrometer.factory", "", PropertyType.CLASSNAME,
"Name of class that implements MeterRegistryFactory.", "2.1.0"),
GENERAL_PROCESS_BIND_ADDRESS("general.process.bind.addr", "0.0.0.0", PropertyType.STRING,
"The local IP address to which this server should bind for sending and receiving network traffic.",
"3.0.0"),
// properties that are specific to manager server behavior
MANAGER_PREFIX("manager.", null, PropertyType.PREFIX,
"Properties in this category affect the behavior of the manager server.", "2.1.0"),
MANAGER_CLIENTPORT("manager.port.client", "9999", PropertyType.PORT,
"The port used for handling client connections on the manager.", "1.3.5"),
MANAGER_TABLET_BALANCER("manager.tablet.balancer",
"org.apache.accumulo.core.spi.balancer.TableLoadBalancer", PropertyType.CLASSNAME,
"The balancer class that accumulo will use to make tablet assignment and "
+ "migration decisions.",
"1.3.5"),
MANAGER_TABLET_GROUP_WATCHER_INTERVAL("manager.tablet.watcher.interval", "60s",
PropertyType.TIMEDURATION,
"Time to wait between scanning tablet states to identify tablets that need to be assigned, un-assigned, migrated, etc.",
"2.1.2"),
MANAGER_BULK_TIMEOUT("manager.bulk.timeout", "5m", PropertyType.TIMEDURATION,
"The time to wait for a tablet server to process a bulk import request.", "1.4.3"),
MANAGER_RENAME_THREADS("manager.rename.threadpool.size", "20", PropertyType.COUNT,
"The number of threads to use when renaming user files during table import or bulk ingest.",
"2.1.0"),
MANAGER_MINTHREADS("manager.server.threads.minimum", "20", PropertyType.COUNT,
"The minimum number of threads to use to handle incoming requests.", "1.4.0"),
MANAGER_MINTHREADS_TIMEOUT("manager.server.threads.timeout", "0s", PropertyType.TIMEDURATION,
"The time after which incoming request threads terminate with no work available. Zero (0) will keep the threads alive indefinitely.",
"2.1.0"),
MANAGER_THREADCHECK("manager.server.threadcheck.time", "1s", PropertyType.TIMEDURATION,
"The time between adjustments of the server thread pool.", "1.4.0"),
MANAGER_RECOVERY_DELAY("manager.recovery.delay", "10s", PropertyType.TIMEDURATION,
"When a tablet server's lock is deleted, it takes time for it to "
+ "completely quit. This delay gives it time before log recoveries begin.",
"1.5.0"),
MANAGER_RECOVERY_WAL_EXISTENCE_CACHE_TIME("manager.recovery.wal.cache.time", "15s",
PropertyType.TIMEDURATION,
"Amount of time that the existence of recovery write-ahead logs is cached.", "2.1.2"),
MANAGER_LEASE_RECOVERY_WAITING_PERIOD("manager.lease.recovery.interval", "5s",
PropertyType.TIMEDURATION,
"The amount of time to wait after requesting a write-ahead log to be recovered.", "1.5.0"),
MANAGER_WAL_CLOSER_IMPLEMENTATION("manager.wal.closer.implementation",
"org.apache.accumulo.server.manager.recovery.HadoopLogCloser", PropertyType.CLASSNAME,
"A class that implements a mechanism to steal write access to a write-ahead log.", "2.1.0"),
MANAGER_FATE_METRICS_MIN_UPDATE_INTERVAL("manager.fate.metrics.min.update.interval", "60s",
PropertyType.TIMEDURATION, "Limit calls from metric sinks to zookeeper to update interval.",
"1.9.3"),
MANAGER_FATE_THREADPOOL_SIZE("manager.fate.threadpool.size", "4", PropertyType.COUNT,
"The number of threads used to run fault-tolerant executions (FATE)."
+ " These are primarily table operations like merge.",
"1.4.3"),
MANAGER_STATUS_THREAD_POOL_SIZE("manager.status.threadpool.size", "0", PropertyType.COUNT,
"The number of threads to use when fetching the tablet server status for balancing. Zero "
+ "indicates an unlimited number of threads will be used.",
"1.8.0"),
MANAGER_METADATA_SUSPENDABLE("manager.metadata.suspendable", "false", PropertyType.BOOLEAN,
"Allow tablets for the " + MetadataTable.NAME
+ " table to be suspended via table.suspend.duration.",
"1.8.0"),
MANAGER_STARTUP_TSERVER_AVAIL_MIN_COUNT("manager.startup.tserver.avail.min.count", "0",
PropertyType.COUNT,
"Minimum number of tservers that need to be registered before manager will "
+ "start tablet assignment - checked at manager initialization, when manager gets lock. "
+ " When set to 0 or less, no blocking occurs. Default is 0 (disabled) to keep original "
+ " behaviour.",
"1.10.0"),
MANAGER_STARTUP_TSERVER_AVAIL_MAX_WAIT("manager.startup.tserver.avail.max.wait", "0",
PropertyType.TIMEDURATION,
"Maximum time manager will wait for tserver available threshold "
+ "to be reached before continuing. When set to 0 or less, will block "
+ "indefinitely. Default is 0 to block indefinitely. Only valid when tserver available "
+ "threshold is set greater than 0.",
"1.10.0"),
// properties that are specific to scan server behavior
@Experimental
SSERV_PREFIX("sserver.", null, PropertyType.PREFIX,
"Properties in this category affect the behavior of the scan servers.", "2.1.0"),
@Experimental
SSERV_DATACACHE_SIZE("sserver.cache.data.size", "10%", PropertyType.MEMORY,
"Specifies the size of the cache for RFile data blocks on each scan server.", "2.1.0"),
@Experimental
SSERV_INDEXCACHE_SIZE("sserver.cache.index.size", "25%", PropertyType.MEMORY,
"Specifies the size of the cache for RFile index blocks on each scan server.", "2.1.0"),
@Experimental
SSERV_SUMMARYCACHE_SIZE("sserver.cache.summary.size", "10%", PropertyType.MEMORY,
"Specifies the size of the cache for summary data on each scan server.", "2.1.0"),
@Experimental
SSERV_DEFAULT_BLOCKSIZE("sserver.default.blocksize", "1M", PropertyType.BYTES,
"Specifies a default blocksize for the scan server caches.", "2.1.0"),
@Experimental
SSERV_GROUP_NAME("sserver.group", ScanServerSelector.DEFAULT_SCAN_SERVER_GROUP_NAME,
PropertyType.STRING,
"Optional group name that will be made available to the "
+ "ScanServerSelector client plugin. Groups support at least two use cases:"
+ " dedicating resources to scans and/or using different hardware for scans.",
"3.0.0"),
@Experimental
SSERV_CACHED_TABLET_METADATA_EXPIRATION("sserver.cache.metadata.expiration", "5m",
PropertyType.TIMEDURATION, "The time after which cached tablet metadata will be refreshed.",
"2.1.0"),
@Experimental
SSERV_PORTSEARCH("sserver.port.search", "true", PropertyType.BOOLEAN,
"if the ports above are in use, search higher ports until one is available.", "2.1.0"),
@Experimental
SSERV_CLIENTPORT("sserver.port.client", "9996", PropertyType.PORT,
"The port used for handling client connections on the tablet servers.", "2.1.0"),
@Experimental
SSERV_MAX_MESSAGE_SIZE("sserver.server.message.size.max", "1G", PropertyType.BYTES,
"The maximum size of a message that can be sent to a scan server.", "2.1.0"),
@Experimental
SSERV_MINTHREADS("sserver.server.threads.minimum", "2", PropertyType.COUNT,
"The minimum number of threads to use to handle incoming requests.", "2.1.0"),
@Experimental
SSERV_MINTHREADS_TIMEOUT("sserver.server.threads.timeout", "0s", PropertyType.TIMEDURATION,
"The time after which incoming request threads terminate with no work available. Zero (0) will keep the threads alive indefinitely.",
"2.1.0"),
@Experimental
SSERV_SCAN_EXECUTORS_PREFIX("sserver.scan.executors.", null, PropertyType.PREFIX,
"Prefix for defining executors to service scans. See "
+ "[scan executors]({% durl administration/scan-executors %}) for an overview of why and"
+ " how to use this property. For each executor the number of threads, thread priority, "
+ "and an optional prioritizer can be configured. To configure a new executor, set "
+ "`sserver.scan.executors.<name>.threads=<number>`. Optionally, can also set "
+ "`sserver.scan.executors.<name>.priority=<number 1 to 10>`, "
+ "`sserver.scan.executors.<name>.prioritizer=<class name>`, and "
+ "`sserver.scan.executors.<name>.prioritizer.opts.<key>=<value>`.",
"2.1.0"),
@Experimental
SSERV_SCAN_EXECUTORS_DEFAULT_THREADS("sserver.scan.executors.default.threads", "16",
PropertyType.COUNT, "The number of threads for the scan executor that tables use by default.",
"2.1.0"),
SSERV_SCAN_EXECUTORS_DEFAULT_PRIORITIZER("sserver.scan.executors.default.prioritizer", "",
PropertyType.STRING,
"Prioritizer for the default scan executor. Defaults to none which "
+ "results in FIFO priority. Set to a class that implements "
+ ScanPrioritizer.class.getName() + " to configure one.",
"2.1.0"),
@Experimental
SSERV_SCAN_EXECUTORS_META_THREADS("sserver.scan.executors.meta.threads", "8", PropertyType.COUNT,
"The number of threads for the metadata table scan executor.", "2.1.0"),
@Experimental
SSERVER_SCAN_REFERENCE_EXPIRATION_TIME("sserver.scan.reference.expiration", "5m",
PropertyType.TIMEDURATION,
"The amount of time a scan reference is unused before its deleted from metadata table.",
"2.1.0"),
@Experimental
SSERV_THREADCHECK("sserver.server.threadcheck.time", "1s", PropertyType.TIMEDURATION,
"The time between adjustments of the thrift server thread pool.", "2.1.0"),
// properties that are specific to tablet server behavior
TSERV_PREFIX("tserver.", null, PropertyType.PREFIX,
"Properties in this category affect the behavior of the tablet servers.", "1.3.5"),
TSERV_CLIENT_TIMEOUT("tserver.client.timeout", "3s", PropertyType.TIMEDURATION,
"Time to wait for clients to continue scans before closing a session.", "1.3.5"),
TSERV_DEFAULT_BLOCKSIZE("tserver.default.blocksize", "1M", PropertyType.BYTES,
"Specifies a default blocksize for the tserver caches.", "1.3.5"),
TSERV_CACHE_MANAGER_IMPL("tserver.cache.manager.class",
"org.apache.accumulo.core.file.blockfile.cache.lru.LruBlockCacheManager", PropertyType.STRING,
"Specifies the class name of the block cache factory implementation."
+ " Alternative implementation is"
+ " org.apache.accumulo.core.file.blockfile.cache.tinylfu.TinyLfuBlockCacheManager.",
"2.0.0"),
TSERV_DATACACHE_SIZE("tserver.cache.data.size", "10%", PropertyType.MEMORY,
"Specifies the size of the cache for RFile data blocks.", "1.3.5"),
TSERV_INDEXCACHE_SIZE("tserver.cache.index.size", "25%", PropertyType.MEMORY,
"Specifies the size of the cache for RFile index blocks.", "1.3.5"),
TSERV_SUMMARYCACHE_SIZE("tserver.cache.summary.size", "10%", PropertyType.MEMORY,
"Specifies the size of the cache for summary data on each tablet server.", "2.0.0"),
TSERV_PORTSEARCH("tserver.port.search", "false", PropertyType.BOOLEAN,
"if the ports above are in use, search higher ports until one is available.", "1.3.5"),
TSERV_CLIENTPORT("tserver.port.client", "9997", PropertyType.PORT,
"The port used for handling client connections on the tablet servers.", "1.3.5"),
TSERV_TOTAL_MUTATION_QUEUE_MAX("tserver.total.mutation.queue.max", "5%", PropertyType.MEMORY,
"The amount of memory used to store write-ahead-log mutations before flushing them.",
"1.7.0"),
TSERV_TABLET_SPLIT_FINDMIDPOINT_MAXOPEN("tserver.tablet.split.midpoint.files.max", "300",
PropertyType.COUNT,
"To find a tablets split points, all RFiles are opened and their indexes"
+ " are read. This setting determines how many RFiles can be opened at once."
+ " When there are more RFiles than this setting multiple passes must be"
+ " made, which is slower. However opening too many RFiles at once can cause"
+ " problems.",
"1.3.5"),
TSERV_WAL_MAX_REFERENCED("tserver.wal.max.referenced", "3", PropertyType.COUNT,
"When a tablet server has more than this many write ahead logs, any tablet referencing older "
+ "logs over this threshold is minor compacted. Also any tablet referencing this many "
+ "logs or more will be compacted.",
"2.1.0"),
TSERV_WAL_MAX_SIZE("tserver.wal.max.size", "1G", PropertyType.BYTES,
"The maximum size for each write-ahead log. See comment for property"
+ " `tserver.memory.maps.max`.",
"2.1.0"),
TSERV_WAL_MAX_AGE("tserver.wal.max.age", "24h", PropertyType.TIMEDURATION,
"The maximum age for each write-ahead log.", "2.1.0"),
TSERV_WAL_TOLERATED_CREATION_FAILURES("tserver.wal.tolerated.creation.failures", "50",
PropertyType.COUNT,
"The maximum number of failures tolerated when creating a new write-ahead"
+ " log. Negative values will allow unlimited creation failures. Exceeding this"
+ " number of failures consecutively trying to create a new write-ahead log"
+ " causes the TabletServer to exit.",
"2.1.0"),
TSERV_WAL_TOLERATED_WAIT_INCREMENT("tserver.wal.tolerated.wait.increment", "1000ms",
PropertyType.TIMEDURATION,
"The amount of time to wait between failures to create or write a write-ahead log.", "2.1.0"),
// Never wait longer than 5 mins for a retry
TSERV_WAL_TOLERATED_MAXIMUM_WAIT_DURATION("tserver.wal.maximum.wait.duration", "5m",
PropertyType.TIMEDURATION,
"The maximum amount of time to wait after a failure to create or write a write-ahead log.",
"2.1.0"),
TSERV_SCAN_MAX_OPENFILES("tserver.scan.files.open.max", "100", PropertyType.COUNT,
"Maximum total RFiles that all tablets in a tablet server can open for scans.", "1.4.0"),
TSERV_MAX_IDLE("tserver.files.open.idle", "1m", PropertyType.TIMEDURATION,
"Tablet servers leave previously used RFiles open for future queries."
+ " This setting determines how much time an unused RFile should be kept open"
+ " until it is closed.",
"1.3.5"),
TSERV_NATIVEMAP_ENABLED("tserver.memory.maps.native.enabled", "true", PropertyType.BOOLEAN,
"An in-memory data store for accumulo implemented in c++ that increases"
+ " the amount of data accumulo can hold in memory and avoids Java GC pauses.",
"1.3.5"),
TSERV_MAXMEM("tserver.memory.maps.max", "33%", PropertyType.MEMORY,
"Maximum amount of memory that can be used to buffer data written to a"
+ " tablet server. There are two other properties that can effectively limit"
+ " memory usage `table.compaction.minor.logs.threshold` and"
+ " `tserver.wal.max.size`. Ensure that `table.compaction.minor.logs.threshold`"
+ " * `tserver.wal.max.size` >= this property.",
"1.3.5"),
TSERV_SESSION_MAXIDLE("tserver.session.idle.max", "1m", PropertyType.TIMEDURATION,
"When a tablet server's SimpleTimer thread triggers to check idle"
+ " sessions, this configurable option will be used to evaluate scan sessions"
+ " to determine if they can be closed due to inactivity.",
"1.3.5"),
TSERV_UPDATE_SESSION_MAXIDLE("tserver.session.update.idle.max", "1m", PropertyType.TIMEDURATION,
"When a tablet server's SimpleTimer thread triggers to check idle"
+ " sessions, this configurable option will be used to evaluate update"
+ " sessions to determine if they can be closed due to inactivity.",
"1.6.5"),
TSERV_SCAN_EXECUTORS_PREFIX("tserver.scan.executors.", null, PropertyType.PREFIX,
"Prefix for defining executors to service scans. See "
+ "[scan executors]({% durl administration/scan-executors %}) for an overview of why and"
+ " how to use this property. For each executor the number of threads, thread priority, "
+ "and an optional prioritizer can be configured. To configure a new executor, set "
+ "`tserver.scan.executors.<name>.threads=<number>`. Optionally, can also set "
+ "`tserver.scan.executors.<name>.priority=<number 1 to 10>`, "
+ "`tserver.scan.executors.<name>.prioritizer=<class name>`, and "
+ "`tserver.scan.executors.<name>.prioritizer.opts.<key>=<value>`.",
"2.0.0"),
TSERV_SCAN_EXECUTORS_DEFAULT_THREADS("tserver.scan.executors.default.threads", "16",
PropertyType.COUNT, "The number of threads for the scan executor that tables use by default.",
"2.0.0"),
TSERV_SCAN_EXECUTORS_DEFAULT_PRIORITIZER("tserver.scan.executors.default.prioritizer", "",
PropertyType.STRING,
"Prioritizer for the default scan executor. Defaults to none which "
+ "results in FIFO priority. Set to a class that implements "
+ ScanPrioritizer.class.getName() + " to configure one.",
"2.0.0"),
TSERV_SCAN_EXECUTORS_META_THREADS("tserver.scan.executors.meta.threads", "8", PropertyType.COUNT,
"The number of threads for the metadata table scan executor.", "2.0.0"),
TSERV_SCAN_RESULTS_MAX_TIMEOUT("tserver.scan.results.max.timeout", "1s",
PropertyType.TIMEDURATION,
"Max time for the thrift client handler to wait for scan results before timing out.",
"2.1.0"),
TSERV_MIGRATE_MAXCONCURRENT("tserver.migrations.concurrent.max", "1", PropertyType.COUNT,
"The maximum number of concurrent tablet migrations for a tablet server.", "1.3.5"),
TSERV_MAJC_DELAY("tserver.compaction.major.delay", "30s", PropertyType.TIMEDURATION,
"Time a tablet server will sleep between checking which tablets need compaction.", "1.3.5"),
TSERV_COMPACTION_SERVICE_PREFIX("tserver.compaction.major.service.", null, PropertyType.PREFIX,
"Prefix for compaction services.", "2.1.0"),
TSERV_COMPACTION_SERVICE_ROOT_PLANNER("tserver.compaction.major.service.root.planner",
DefaultCompactionPlanner.class.getName(), PropertyType.CLASSNAME,
"Compaction planner for root tablet service.", "2.1.0"),
@Deprecated(since = "3.1", forRemoval = true)
TSERV_COMPACTION_SERVICE_ROOT_RATE_LIMIT("tserver.compaction.major.service.root.rate.limit", "0B",
PropertyType.BYTES,
"Maximum number of bytes to read or write per second over all major"
+ " compactions in this compaction service, or 0B for unlimited.",
"2.1.0"),
TSERV_COMPACTION_SERVICE_ROOT_MAX_OPEN(
"tserver.compaction.major.service.root.planner.opts.maxOpen", "30", PropertyType.COUNT,
"The maximum number of files a compaction will open.", "2.1.0"),
TSERV_COMPACTION_SERVICE_ROOT_EXECUTORS(
"tserver.compaction.major.service.root.planner.opts.executors",
"[{'name':'small','type':'internal','maxSize':'32M','numThreads':1},{'name':'huge','type':'internal','numThreads':1}]"
.replaceAll("'", "\""),
PropertyType.STRING,
"See {% jlink -f org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner %}.",
"2.1.0"),
TSERV_COMPACTION_SERVICE_META_PLANNER("tserver.compaction.major.service.meta.planner",
DefaultCompactionPlanner.class.getName(), PropertyType.CLASSNAME,
"Compaction planner for metadata table.", "2.1.0"),
@Deprecated(since = "3.1", forRemoval = true)
TSERV_COMPACTION_SERVICE_META_RATE_LIMIT("tserver.compaction.major.service.meta.rate.limit", "0B",
PropertyType.BYTES,
"Maximum number of bytes to read or write per second over all major"
+ " compactions in this compaction service, or 0B for unlimited.",
"2.1.0"),
TSERV_COMPACTION_SERVICE_META_MAX_OPEN(
"tserver.compaction.major.service.meta.planner.opts.maxOpen", "30", PropertyType.COUNT,
"The maximum number of files a compaction will open.", "2.1.0"),
TSERV_COMPACTION_SERVICE_META_EXECUTORS(
"tserver.compaction.major.service.meta.planner.opts.executors",
"[{'name':'small','type':'internal','maxSize':'32M','numThreads':2},{'name':'huge','type':'internal','numThreads':2}]"
.replaceAll("'", "\""),
PropertyType.JSON,
"See {% jlink -f org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner %}.",
"2.1.0"),
TSERV_COMPACTION_SERVICE_DEFAULT_PLANNER("tserver.compaction.major.service.default.planner",
DefaultCompactionPlanner.class.getName(), PropertyType.CLASSNAME,
"Planner for default compaction service.", "2.1.0"),
@Deprecated(since = "3.1", forRemoval = true)
TSERV_COMPACTION_SERVICE_DEFAULT_RATE_LIMIT("tserver.compaction.major.service.default.rate.limit",
"0B", PropertyType.BYTES,
"Maximum number of bytes to read or write per second over all major"
+ " compactions in this compaction service, or 0B for unlimited.",
"2.1.0"),
TSERV_COMPACTION_SERVICE_DEFAULT_MAX_OPEN(
"tserver.compaction.major.service.default.planner.opts.maxOpen", "10", PropertyType.COUNT,
"The maximum number of files a compaction will open.", "2.1.0"),
TSERV_COMPACTION_SERVICE_DEFAULT_EXECUTORS(
"tserver.compaction.major.service.default.planner.opts.executors",
"[{'name':'small','type':'internal','maxSize':'32M','numThreads':2},{'name':'medium','type':'internal','maxSize':'128M','numThreads':2},{'name':'large','type':'internal','numThreads':2}]"
.replaceAll("'", "\""),
PropertyType.STRING,
"See {% jlink -f org.apache.accumulo.core.spi.compaction.DefaultCompactionPlanner %}.",
"2.1.0"),
TSERV_MINC_MAXCONCURRENT("tserver.compaction.minor.concurrent.max", "4", PropertyType.COUNT,
"The maximum number of concurrent minor compactions for a tablet server.", "1.3.5"),
TSERV_COMPACTION_WARN_TIME("tserver.compaction.warn.time", "10m", PropertyType.TIMEDURATION,
"When a compaction has not made progress for this time period, a warning will be logged.",
"1.6.0"),
TSERV_BLOOM_LOAD_MAXCONCURRENT("tserver.bloom.load.concurrent.max", "4", PropertyType.COUNT,
"The number of concurrent threads that will load bloom filters in the background. "
+ "Setting this to zero will make bloom filters load in the foreground.",
"1.3.5"),
TSERV_MEMDUMP_DIR("tserver.dir.memdump", "/tmp", PropertyType.PATH,
"A long running scan could possibly hold memory that has been minor"
+ " compacted. To prevent this, the in memory map is dumped to a local file"
+ " and the scan is switched to that local file. We can not switch to the"
+ " minor compacted file because it may have been modified by iterators. The"
+ " file dumped to the local dir is an exact copy of what was in memory.",
"1.3.5"),
TSERV_HEALTH_CHECK_FREQ("tserver.health.check.interval", "30m", PropertyType.TIMEDURATION,
"The time between tablet server health checks.", "2.1.0"),
TSERV_MINTHREADS("tserver.server.threads.minimum", "20", PropertyType.COUNT,
"The minimum number of threads to use to handle incoming requests.", "1.4.0"),
TSERV_MINTHREADS_TIMEOUT("tserver.server.threads.timeout", "0s", PropertyType.TIMEDURATION,
"The time after which incoming request threads terminate with no work available. Zero (0) will keep the threads alive indefinitely.",
"2.1.0"),
TSERV_THREADCHECK("tserver.server.threadcheck.time", "1s", PropertyType.TIMEDURATION,
"The time between adjustments of the server thread pool.", "1.4.0"),
TSERV_MAX_MESSAGE_SIZE("tserver.server.message.size.max", "1G", PropertyType.BYTES,
"The maximum size of a message that can be sent to a tablet server.", "1.6.0"),
TSERV_LOG_BUSY_TABLETS_COUNT("tserver.log.busy.tablets.count", "0", PropertyType.COUNT,
"Number of busiest tablets to log. Logged at interval controlled by "
+ "tserver.log.busy.tablets.interval. If <= 0, logging of busy tablets is disabled.",
"1.10.0"),
TSERV_LOG_BUSY_TABLETS_INTERVAL("tserver.log.busy.tablets.interval", "1h",
PropertyType.TIMEDURATION, "Time interval between logging out busy tablets information.",
"1.10.0"),
TSERV_HOLD_TIME_SUICIDE("tserver.hold.time.max", "5m", PropertyType.TIMEDURATION,
"The maximum time for a tablet server to be in the \"memory full\" state."
+ " If the tablet server cannot write out memory in this much time, it will"
+ " assume there is some failure local to its node, and quit. A value of zero"
+ " is equivalent to forever.",
"1.4.0"),
TSERV_WAL_BLOCKSIZE("tserver.wal.blocksize", "0", PropertyType.BYTES,
"The size of the HDFS blocks used to write to the Write-Ahead log. If"
+ " zero, it will be 110% of `tserver.wal.max.size` (that is, try to use just"
+ " one block).",
"1.5.0"),
TSERV_WAL_REPLICATION("tserver.wal.replication", "0", PropertyType.COUNT,
"The replication to use when writing the Write-Ahead log to HDFS. If"
+ " zero, it will use the HDFS default replication setting.",
"1.5.0"),
TSERV_WAL_SORT_MAX_CONCURRENT("tserver.wal.sort.concurrent.max", "2", PropertyType.COUNT,
"The maximum number of threads to use to sort logs during recovery.", "2.1.0"),
TSERV_WAL_SORT_BUFFER_SIZE("tserver.wal.sort.buffer.size", "10%", PropertyType.MEMORY,
"The amount of memory to use when sorting logs during recovery.", "2.1.0"),
TSERV_WAL_SORT_FILE_PREFIX("tserver.wal.sort.file.", null, PropertyType.PREFIX,
"The rfile properties to use when sorting logs during recovery. Most of the properties"
+ " that begin with 'table.file' can be used here. For example, to set the compression"
+ " of the sorted recovery files to snappy use 'tserver.wal.sort.file.compress.type=snappy'.",
"2.1.0"),
TSERV_WORKQ_THREADS("tserver.workq.threads", "2", PropertyType.COUNT,
"The number of threads for the distributed work queue. These threads are"
+ " used for copying failed bulk import RFiles.",
"1.4.2"),
TSERV_WAL_SYNC("tserver.wal.sync", "true", PropertyType.BOOLEAN,
"Use the SYNC_BLOCK create flag to sync WAL writes to disk. Prevents"
+ " problems recovering from sudden system resets.",
"1.5.0"),
TSERV_ASSIGNMENT_DURATION_WARNING("tserver.assignment.duration.warning", "10m",
PropertyType.TIMEDURATION,
"The amount of time an assignment can run before the server will print a"
+ " warning along with the current stack trace. Meant to help debug stuck"
+ " assignments.",
"1.6.2"),
TSERV_ASSIGNMENT_MAXCONCURRENT("tserver.assignment.concurrent.max", "2", PropertyType.COUNT,
"The number of threads available to load tablets. Recoveries are still performed serially.",
"1.7.0"),
TSERV_SLOW_FLUSH_MILLIS("tserver.slow.flush.time", "100ms", PropertyType.TIMEDURATION,
"If a flush to the write-ahead log takes longer than this period of time,"
+ " debugging information will written, and may result in a log rollover.",
"1.8.0"),
TSERV_SLOW_FILEPERMIT_MILLIS("tserver.slow.filepermit.time", "100ms", PropertyType.TIMEDURATION,
"If a thread blocks more than this period of time waiting to get file permits,"
+ " debugging information will be written.",
"1.9.3"),
TSERV_SUMMARY_PARTITION_THREADS("tserver.summary.partition.threads", "10", PropertyType.COUNT,
"Summary data must be retrieved from RFiles. For a large number of"
+ " RFiles, the files are broken into partitions of 100k files. This setting"
+ " determines how many of these groups of 100k RFiles will be processed"
+ " concurrently.",
"2.0.0"),
TSERV_SUMMARY_REMOTE_THREADS("tserver.summary.remote.threads", "128", PropertyType.COUNT,
"For a partitioned group of 100k RFiles, those files are grouped by"
+ " tablet server. Then a remote tablet server is asked to gather summary"
+ " data. This setting determines how many concurrent request are made per"
+ " partition.",
"2.0.0"),
TSERV_SUMMARY_RETRIEVAL_THREADS("tserver.summary.retrieval.threads", "10", PropertyType.COUNT,
"The number of threads on each tablet server available to retrieve"
+ " summary data, that is not currently in cache, from RFiles.",
"2.0.0"),
TSERV_LAST_LOCATION_MODE("tserver.last.location.mode", "compaction",
PropertyType.LAST_LOCATION_MODE,
"Describes how the system will record the 'last' location for tablets, which can be used for"
+ " assigning them when a cluster restarts. If 'compaction' is the mode, then the system"
+ " will record the location where the tablet's most recent compaction occurred. If"
+ " 'assignment' is the mode, then the most recently assigned location will be recorded."
+ " The manager.startup.tserver properties might also need to be set to ensure the"
+ " tserver is available before tablets are initially assigned if the 'last' location is"
+ " to be used.",
"2.1.1"),
// accumulo garbage collector properties
GC_PREFIX("gc.", null, PropertyType.PREFIX,
"Properties in this category affect the behavior of the accumulo garbage collector.",
"1.3.5"),
GC_CANDIDATE_BATCH_SIZE("gc.candidate.batch.size", "50%", PropertyType.MEMORY,
"The amount of memory used as the batch size for garbage collection.", "2.1.0"),
GC_CYCLE_START("gc.cycle.start", "30s", PropertyType.TIMEDURATION,
"Time to wait before attempting to garbage collect any old RFiles or write-ahead logs.",
"1.3.5"),
GC_CYCLE_DELAY("gc.cycle.delay", "5m", PropertyType.TIMEDURATION,
"Time between garbage collection cycles. In each cycle, old RFiles or write-ahead logs "
+ "no longer in use are removed from the filesystem.",
"1.3.5"),
GC_PORT("gc.port.client", "9998", PropertyType.PORT,
"The listening port for the garbage collector's monitor service.", "1.3.5"),
GC_DELETE_THREADS("gc.threads.delete", "16", PropertyType.COUNT,
"The number of threads used to delete RFiles and write-ahead logs.", "1.3.5"),
@Experimental
GC_REMOVE_IN_USE_CANDIDATES("gc.remove.in.use.candidates", "true", PropertyType.BOOLEAN,
"GC will remove deletion candidates that are in-use from the metadata location. "
+ "This is expected to increase the speed of subsequent GC runs.",
"2.1.3"),
GC_SAFEMODE("gc.safemode", "false", PropertyType.BOOLEAN,
"Provides listing of files to be deleted but does not delete any files.", "2.1.0"),
GC_USE_FULL_COMPACTION("gc.post.metadata.action", "flush", PropertyType.GC_POST_ACTION,
"When the gc runs it can make a lot of changes to the metadata, on completion, "
+ " to force the changes to be written to disk, the metadata and root tables can be flushed"
+ " and possibly compacted. Legal values are: compact - which both flushes and compacts the"
+ " metadata; flush - which flushes only (compactions may be triggered if required); or none.",
"1.10.0"),
// properties that are specific to the monitor server behavior
MONITOR_PREFIX("monitor.", null, PropertyType.PREFIX,
"Properties in this category affect the behavior of the monitor web server.", "1.3.5"),
MONITOR_PORT("monitor.port.client", "9995", PropertyType.PORT,
"The listening port for the monitor's http service.", "1.3.5"),
MONITOR_SSL_KEYSTORE("monitor.ssl.keyStore", "", PropertyType.PATH,
"The keystore for enabling monitor SSL.", "1.5.0"),
@Sensitive
MONITOR_SSL_KEYSTOREPASS("monitor.ssl.keyStorePassword", "", PropertyType.STRING,
"The keystore password for enabling monitor SSL.", "1.5.0"),
MONITOR_SSL_KEYSTORETYPE("monitor.ssl.keyStoreType", "jks", PropertyType.STRING,
"Type of SSL keystore.", "1.7.0"),
@Sensitive
MONITOR_SSL_KEYPASS("monitor.ssl.keyPassword", "", PropertyType.STRING,
"Optional: the password for the private key in the keyStore. When not provided, this "
+ "defaults to the keystore password.",
"1.9.3"),
MONITOR_SSL_TRUSTSTORE("monitor.ssl.trustStore", "", PropertyType.PATH,
"The truststore for enabling monitor SSL.", "1.5.0"),
@Sensitive
MONITOR_SSL_TRUSTSTOREPASS("monitor.ssl.trustStorePassword", "", PropertyType.STRING,
"The truststore password for enabling monitor SSL.", "1.5.0"),
MONITOR_SSL_TRUSTSTORETYPE("monitor.ssl.trustStoreType", "jks", PropertyType.STRING,
"Type of SSL truststore.", "1.7.0"),
MONITOR_SSL_INCLUDE_CIPHERS("monitor.ssl.include.ciphers", "", PropertyType.STRING,
"A comma-separated list of allows SSL Ciphers, see"
+ " monitor.ssl.exclude.ciphers to disallow ciphers.",
"1.6.1"),
MONITOR_SSL_EXCLUDE_CIPHERS("monitor.ssl.exclude.ciphers", "", PropertyType.STRING,
"A comma-separated list of disallowed SSL Ciphers, see"
+ " monitor.ssl.include.ciphers to allow ciphers.",
"1.6.1"),
MONITOR_SSL_INCLUDE_PROTOCOLS("monitor.ssl.include.protocols", "TLSv1.3", PropertyType.STRING,
"A comma-separate list of allowed SSL protocols.", "1.5.3"),
MONITOR_LOCK_CHECK_INTERVAL("monitor.lock.check.interval", "5s", PropertyType.TIMEDURATION,
"The amount of time to sleep between checking for the Monitor ZooKeeper lock.", "1.5.1"),
MONITOR_RESOURCES_EXTERNAL("monitor.resources.external", "", PropertyType.JSON,
"A JSON Map of Strings. Each String should be an HTML tag of an external"
+ " resource (JS or CSS) to be imported by the Monitor. Be sure to wrap"
+ " with CDATA tags. If this value is set, all of the external resources"
+ " in the `<head>` tag of the Monitor will be replaced with the tags set here."
+ " Be sure the jquery tag is first since other scripts will depend on it."
+ " The resources that are used by default can be seen in"
+ " `accumulo/server/monitor/src/main/resources/templates/default.ftl`.",
"2.0.0"),
// per table properties
TABLE_PREFIX("table.", null, PropertyType.PREFIX,
"Properties in this category affect tablet server treatment of tablets,"
+ " but can be configured on a per-table basis. Setting these properties in"
+ " accumulo.properties will override the default globally for all tables and not"
+ " any specific table. However, both the default and the global setting can"
+ " be overridden per table using the table operations API or in the shell,"
+ " which sets the overridden value in zookeeper. Restarting accumulo tablet"
+ " servers after setting these properties in accumulo.properties will cause the"
+ " global setting to take effect. However, you must use the API or the shell"
+ " to change properties in zookeeper that are set on a table.",
"1.3.5"),
TABLE_ARBITRARY_PROP_PREFIX("table.custom.", null, PropertyType.PREFIX,
"Prefix to be used for user defined arbitrary properties.", "1.7.0"),
TABLE_MINC_OUTPUT_DROP_CACHE("table.compaction.minor.output.drop.cache", "false",
PropertyType.BOOLEAN,
"Setting this property to true will call"
+ "FSDataOutputStream.setDropBehind(true) on the minor compaction output stream.",
"2.1.1"),
TABLE_MAJC_OUTPUT_DROP_CACHE("table.compaction.major.output.drop.cache", "false",
PropertyType.BOOLEAN,
"Setting this property to true will call"
+ "FSDataOutputStream.setDropBehind(true) on the major compaction output stream.",
"2.1.1"),
TABLE_MAJC_RATIO("table.compaction.major.ratio", "3", PropertyType.FRACTION,
"Minimum ratio of total input size to maximum input RFile size for"
+ " running a major compaction.",
"1.3.5"),
TABLE_SPLIT_THRESHOLD("table.split.threshold", "1G", PropertyType.BYTES,
"A tablet is split when the combined size of RFiles exceeds this amount.", "1.3.5"),
TABLE_MAX_END_ROW_SIZE("table.split.endrow.size.max", "10k", PropertyType.BYTES,
"Maximum size of end row.", "1.7.0"),
TABLE_MINC_COMPACT_IDLETIME("table.compaction.minor.idle", "5m", PropertyType.TIMEDURATION,
"After a tablet has been idle (no mutations) for this time period it may have its "
+ "in-memory map flushed to disk in a minor compaction. There is no guarantee an idle "
+ "tablet will be compacted.",
"1.3.5"),
TABLE_COMPACTION_DISPATCHER("table.compaction.dispatcher",
SimpleCompactionDispatcher.class.getName(), PropertyType.CLASSNAME,
"A configurable dispatcher that decides what compaction service a table should use.",
"2.1.0"),
TABLE_COMPACTION_DISPATCHER_OPTS("table.compaction.dispatcher.opts.", null, PropertyType.PREFIX,
"Options for the table compaction dispatcher.", "2.1.0"),
TABLE_COMPACTION_SELECTION_EXPIRATION("table.compaction.selection.expiration.ms", "2m",
PropertyType.TIMEDURATION,
"User compactions select files and are then queued for compaction, preventing these files "
+ "from being used in system compactions. This timeout allows system compactions to cancel "
+ "the hold queued user compactions have on files, when its queued for more than the "
+ "specified time. If a system compaction cancels a hold and runs, then the user compaction"
+ " can reselect and hold files after the system compaction runs.",
"2.1.0"),
TABLE_COMPACTION_SELECTOR("table.compaction.selector", "", PropertyType.CLASSNAME,
"A configurable selector for a table that can periodically select file for mandatory "
+ "compaction, even if the files do not meet the compaction ratio.",
"2.1.0"),
TABLE_COMPACTION_SELECTOR_OPTS("table.compaction.selector.opts.", null, PropertyType.PREFIX,
"Options for the table compaction dispatcher.", "2.1.0"),
TABLE_COMPACTION_CONFIGURER("table.compaction.configurer", "", PropertyType.CLASSNAME,
"A plugin that can dynamically configure compaction output files based on input files.",
"2.1.0"),
TABLE_COMPACTION_CONFIGURER_OPTS("table.compaction.configurer.opts.", null, PropertyType.PREFIX,
"Options for the table compaction configuror.", "2.1.0"),
// Crypto-related properties
@Experimental
TABLE_CRYPTO_PREFIX("table.crypto.opts.", null, PropertyType.PREFIX,
"Properties related to on-disk file encryption.", "2.1.0"),
@Experimental
@Sensitive
TABLE_CRYPTO_SENSITIVE_PREFIX("table.crypto.opts.sensitive.", null, PropertyType.PREFIX,
"Sensitive properties related to on-disk file encryption.", "2.1.0"),
TABLE_SCAN_DISPATCHER("table.scan.dispatcher", SimpleScanDispatcher.class.getName(),
PropertyType.CLASSNAME,
"This class is used to dynamically dispatch scans to configured scan executors. Configured "
+ "classes must implement {% jlink " + ScanDispatcher.class.getName() + " %}. See "
+ "[scan executors]({% durl administration/scan-executors %}) for an overview of why"
+ " and how to use this property. This property is ignored for the root and metadata"
+ " table. The metadata table always dispatches to a scan executor named `meta`.",
"2.0.0"),
TABLE_SCAN_DISPATCHER_OPTS("table.scan.dispatcher.opts.", null, PropertyType.PREFIX,
"Options for the table scan dispatcher.", "2.0.0"),
TABLE_SCAN_MAXMEM("table.scan.max.memory", "512k", PropertyType.BYTES,
"The maximum amount of memory that will be used to cache results of a client query/scan. "
+ "Once this limit is reached, the buffered data is sent to the client.",
"1.3.5"),
TABLE_FILE_TYPE("table.file.type", RFile.EXTENSION, PropertyType.FILENAME_EXT,
"Change the type of file a table writes.", "1.3.5"),
TABLE_LOAD_BALANCER("table.balancer", "org.apache.accumulo.core.spi.balancer.SimpleLoadBalancer",
PropertyType.STRING,
"This property can be set to allow the LoadBalanceByTable load balancer"
+ " to change the called Load Balancer for this table.",
"1.3.5"),
TABLE_FILE_COMPRESSION_TYPE("table.file.compress.type", "gz", PropertyType.STRING,
"Compression algorithm used on index and data blocks before they are"
+ " written. Possible values: zstd, gz, snappy, bzip2, lzo, lz4, none.",
"1.3.5"),
TABLE_FILE_COMPRESSED_BLOCK_SIZE("table.file.compress.blocksize", "100k", PropertyType.BYTES,
"The maximum size of data blocks in RFiles before they are compressed and written.", "1.3.5"),
TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX("table.file.compress.blocksize.index", "128k",
PropertyType.BYTES,
"The maximum size of index blocks in RFiles before they are compressed and written.",
"1.4.0"),
TABLE_FILE_BLOCK_SIZE("table.file.blocksize", "0B", PropertyType.BYTES,
"The HDFS block size used when writing RFiles. When set to 0B, the"
+ " value/defaults of HDFS property 'dfs.block.size' will be used.",
"1.3.5"),
TABLE_FILE_REPLICATION("table.file.replication", "0", PropertyType.COUNT,
"The number of replicas for a table's RFiles in HDFS. When set to 0, HDFS"
+ " defaults are used.",
"1.3.5"),
TABLE_FILE_MAX("table.file.max", "15", PropertyType.COUNT,
"The maximum number of RFiles each tablet in a table can have. When"
+ " adjusting this property you may want to consider adjusting"
+ " table.compaction.major.ratio also. Setting this property to 0 will make"
+ " it default to tserver.scan.files.open.max-1, this will prevent a tablet"
+ " from having more RFiles than can be opened. Setting this property low may"
+ " throttle ingest and increase query performance.",
"1.4.0"),
TABLE_FILE_SUMMARY_MAX_SIZE("table.file.summary.maxSize", "256k", PropertyType.BYTES,
"The maximum size summary that will be stored. The number of RFiles that"
+ " had summary data exceeding this threshold is reported by"
+ " Summary.getFileStatistics().getLarge(). When adjusting this consider the"
+ " expected number RFiles with summaries on each tablet server and the"
+ " summary cache size.",
"2.0.0"),
TABLE_BLOOM_ENABLED("table.bloom.enabled", "false", PropertyType.BOOLEAN,
"Use bloom filters on this table.", "1.3.5"),
TABLE_BLOOM_LOAD_THRESHOLD("table.bloom.load.threshold", "1", PropertyType.COUNT,
"This number of seeks that would actually use a bloom filter must occur"
+ " before a RFile's bloom filter is loaded. Set this to zero to initiate"
+ " loading of bloom filters when a RFile is opened.",
"1.3.5"),
TABLE_BLOOM_SIZE("table.bloom.size", "1048576", PropertyType.COUNT,
"Bloom filter size, as number of keys.", "1.3.5"),
TABLE_BLOOM_ERRORRATE("table.bloom.error.rate", "0.5%", PropertyType.FRACTION,
"Bloom filter error rate.", "1.3.5"),
TABLE_BLOOM_KEY_FUNCTOR("table.bloom.key.functor",
"org.apache.accumulo.core.file.keyfunctor.RowFunctor", PropertyType.CLASSNAME,
"A function that can transform the key prior to insertion and check of"
+ " bloom filter. org.apache.accumulo.core.file.keyfunctor.RowFunctor,"
+ " org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor, and"
+ " org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor are"
+ " allowable values. One can extend any of the above mentioned classes to"
+ " perform specialized parsing of the key.",
"1.3.5"),
TABLE_BLOOM_HASHTYPE("table.bloom.hash.type", "murmur", PropertyType.STRING,
"The bloom filter hash type.", "1.3.5"),
TABLE_BULK_MAX_TABLETS("table.bulk.max.tablets", "0", PropertyType.COUNT,
"The maximum number of tablets allowed for one bulk import file. Value of 0 is Unlimited. "
+ "This property is only enforced in the new bulk import API.",
"2.1.0"),
TABLE_DURABILITY("table.durability", "sync", PropertyType.DURABILITY,
"The durability used to write to the write-ahead log. Legal values are:"
+ " none, which skips the write-ahead log; log, which sends the data to the"
+ " write-ahead log, but does nothing to make it durable; flush, which pushes"
+ " data to the file system; and sync, which ensures the data is written to disk.",
"1.7.0"),
TABLE_FAILURES_IGNORE("table.failures.ignore", "false", PropertyType.BOOLEAN,
"If you want queries for your table to hang or fail when data is missing"
+ " from the system, then set this to false. When this set to true missing"
+ " data will be reported but queries will still run possibly returning a"
+ " subset of the data.",
"1.3.5"),
TABLE_DEFAULT_SCANTIME_VISIBILITY("table.security.scan.visibility.default", "",
PropertyType.STRING,
"The security label that will be assumed at scan time if an entry does"
+ " not have a visibility expression.\n"
+ "Note: An empty security label is displayed as []. The scan results"
+ " will show an empty visibility even if the visibility from this"
+ " setting is applied to the entry.\n"
+ "CAUTION: If a particular key has an empty security label AND its"
+ " table's default visibility is also empty, access will ALWAYS be"
+ " granted for users with permission to that table. Additionally, if this"
+ " field is changed, all existing data with an empty visibility label"
+ " will be interpreted with the new label on the next scan.",
"1.3.5"),
TABLE_LOCALITY_GROUPS("table.groups.enabled", "", PropertyType.STRING,
"A comma separated list of locality group names to enable for this table.", "1.3.5"),
TABLE_CONSTRAINT_PREFIX("table.constraint.", null, PropertyType.PREFIX,
"Properties in this category are per-table properties that add"
+ " constraints to a table. These properties start with the category"
+ " prefix, followed by a number, and their values correspond to a fully"
+ " qualified Java class that implements the Constraint interface.\nFor example:\n"
+ "table.constraint.1 = org.apache.accumulo.core.constraints.MyCustomConstraint\n"
+ "and:\n table.constraint.2 = my.package.constraints.MySecondConstraint.",
"1.3.5"),
TABLE_INDEXCACHE_ENABLED("table.cache.index.enable", "true", PropertyType.BOOLEAN,
"Determines whether index block cache is enabled for a table.", "1.3.5"),
TABLE_BLOCKCACHE_ENABLED("table.cache.block.enable", "false", PropertyType.BOOLEAN,
"Determines whether data block cache is enabled for a table.", "1.3.5"),
TABLE_ITERATOR_PREFIX("table.iterator.", null, PropertyType.PREFIX,
"Properties in this category specify iterators that are applied at"
+ " various stages (scopes) of interaction with a table. These properties"
+ " start with the category prefix, followed by a scope (minc, majc, scan,"
+ " etc.), followed by a period, followed by a name, as in"
+ " table.iterator.scan.vers, or table.iterator.scan.custom. The values for"
+ " these properties are a number indicating the ordering in which it is"
+ " applied, and a class name such as:\n"
+ "table.iterator.scan.vers = 10,org.apache.accumulo.core.iterators.VersioningIterator\n"
+ "These iterators can take options if additional properties are set that"
+ " look like this property, but are suffixed with a period, followed by 'opt'"
+ " followed by another period, and a property name.\n"
+ "For example, table.iterator.minc.vers.opt.maxVersions = 3.",
"1.3.5"),
TABLE_ITERATOR_SCAN_PREFIX(TABLE_ITERATOR_PREFIX.getKey() + IteratorScope.scan.name() + ".", null,
PropertyType.PREFIX, "Convenience prefix to find options for the scan iterator scope.",
"1.5.2"),
TABLE_ITERATOR_MINC_PREFIX(TABLE_ITERATOR_PREFIX.getKey() + IteratorScope.minc.name() + ".", null,
PropertyType.PREFIX, "Convenience prefix to find options for the minc iterator scope.",
"1.5.2"),
TABLE_ITERATOR_MAJC_PREFIX(TABLE_ITERATOR_PREFIX.getKey() + IteratorScope.majc.name() + ".", null,
PropertyType.PREFIX, "Convenience prefix to find options for the majc iterator scope.",
"1.5.2"),
TABLE_LOCALITY_GROUP_PREFIX("table.group.", null, PropertyType.PREFIX,
"Properties in this category are per-table properties that define"
+ " locality groups in a table. These properties start with the category"
+ " prefix, followed by a name, followed by a period, and followed by a"
+ " property for that group.\n"
+ "For example table.group.group1=x,y,z sets the column families for a"
+ " group called group1. Once configured, group1 can be enabled by adding"
+ " it to the list of groups in the " + TABLE_LOCALITY_GROUPS.getKey() + " property.\n"
+ "Additional group options may be specified for a named group by setting"
+ " `table.group.<name>.opt.<key>=<value>`.",
"1.3.5"),
TABLE_FORMATTER_CLASS("table.formatter", DefaultFormatter.class.getName(), PropertyType.STRING,
"The Formatter class to apply on results in the shell.", "1.4.0"),
TABLE_CLASSLOADER_CONTEXT("table.class.loader.context", "", PropertyType.STRING,
"The context to use for loading per-table resources, such as iterators"
+ " from the configured factory in `general.context.class.loader.factory`.",
"2.1.0"),
TABLE_SAMPLER("table.sampler", "", PropertyType.CLASSNAME,
"The name of a class that implements org.apache.accumulo.core.Sampler."
+ " Setting this option enables storing a sample of data which can be"
+ " scanned. Always having a current sample can useful for query optimization"
+ " and data comprehension. After enabling sampling for an existing table,"
+ " a compaction is needed to compute the sample for existing data. The"
+ " compact command in the shell has an option to only compact RFiles without"
+ " sample data.",
"1.8.0"),
TABLE_SAMPLER_OPTS("table.sampler.opt.", null, PropertyType.PREFIX,
"The property is used to set options for a sampler. If a sample had two"
+ " options like hasher and modulous, then the two properties"
+ " table.sampler.opt.hasher=${hash algorithm} and"
+ " table.sampler.opt.modulous=${mod} would be set.",
"1.8.0"),
TABLE_SUSPEND_DURATION("table.suspend.duration", "0s", PropertyType.TIMEDURATION,
"For tablets belonging to this table: When a tablet server dies, allow"
+ " the tablet server this duration to revive before reassigning its tablets"
+ " to other tablet servers.",
"1.8.0"),
TABLE_SUMMARIZER_PREFIX("table.summarizer.", null, PropertyType.PREFIX,
"Prefix for configuring summarizers for a table. Using this prefix"
+ " multiple summarizers can be configured with options for each one. Each"
+ " summarizer configured should have a unique id, this id can be anything."
+ " To add a summarizer set "
+ "`table.summarizer.<unique id>=<summarizer class name>.` If the summarizer has options"
+ ", then for each option set `table.summarizer.<unique id>.opt.<key>=<value>`.",
"2.0.0"),
@Experimental
TABLE_DELETE_BEHAVIOR("table.delete.behavior",
DeletingIterator.Behavior.PROCESS.name().toLowerCase(), PropertyType.STRING,
"This determines what action to take when a delete marker is seen."
+ " Valid values are `process` and `fail` with `process` being the default. When set to "
+ "`process`, deletes will suppress data. When set to `fail`, any deletes seen will cause"
+ " an exception. The purpose of `fail` is to support tables that never delete data and"
+ " need fast seeks within the timestamp range of a column. When setting this to fail, "
+ "also consider configuring the `" + NoDeleteConstraint.class.getName() + "` "
+ "constraint.",
"2.0.0"),
// Compactor properties
@Experimental
COMPACTOR_PREFIX("compactor.", null, PropertyType.PREFIX,
"Properties in this category affect the behavior of the accumulo compactor server.", "2.1.0"),
@Experimental
COMPACTOR_PORTSEARCH("compactor.port.search", "false", PropertyType.BOOLEAN,
"If the compactor.port.client is in use, search higher ports until one is available.",
"2.1.0"),
@Experimental
COMPACTOR_CLIENTPORT("compactor.port.client", "9133", PropertyType.PORT,
"The port used for handling client connections on the compactor servers.", "2.1.0"),
@Experimental
COMPACTOR_MINTHREADS("compactor.threads.minimum", "1", PropertyType.COUNT,
"The minimum number of threads to use to handle incoming requests.", "2.1.0"),
@Experimental
COMPACTOR_MINTHREADS_TIMEOUT("compactor.threads.timeout", "0s", PropertyType.TIMEDURATION,
"The time after which incoming request threads terminate with no work available. Zero (0) will keep the threads alive indefinitely.",
"2.1.0"),
@Experimental
COMPACTOR_THREADCHECK("compactor.threadcheck.time", "1s", PropertyType.TIMEDURATION,
"The time between adjustments of the server thread pool.", "2.1.0"),
@Experimental
COMPACTOR_MAX_MESSAGE_SIZE("compactor.message.size.max", "10M", PropertyType.BYTES,
"The maximum size of a message that can be sent to a tablet server.", "2.1.0"),
@Experimental
COMPACTOR_QUEUE_NAME("compactor.queue", "", PropertyType.STRING,
"The queue for which this Compactor will perform compactions.", "3.0.0"),
// CompactionCoordinator properties
@Experimental
COMPACTION_COORDINATOR_PREFIX("compaction.coordinator.", null, PropertyType.PREFIX,
"Properties in this category affect the behavior of the accumulo compaction coordinator server.",
"2.1.0"),
@Experimental
COMPACTION_COORDINATOR_THRIFTCLIENT_PORTSEARCH("compaction.coordinator.port.search", "false",
PropertyType.BOOLEAN,
"If the ports above are in use, search higher ports until one is available.", "2.1.0"),
@Experimental
COMPACTION_COORDINATOR_CLIENTPORT("compaction.coordinator.port.client", "9132", PropertyType.PORT,
"The port used for handling Thrift client connections on the compaction coordinator server.",
"2.1.0"),
@Experimental
COMPACTION_COORDINATOR_MINTHREADS("compaction.coordinator.threads.minimum", "1",
PropertyType.COUNT, "The minimum number of threads to use to handle incoming requests.",
"2.1.0"),
@Experimental
COMPACTION_COORDINATOR_MINTHREADS_TIMEOUT("compaction.coordinator.threads.timeout", "0s",
PropertyType.TIMEDURATION,
"The time after which incoming request threads terminate with no work available. Zero (0) will keep the threads alive indefinitely.",
"2.1.0"),
@Experimental
COMPACTION_COORDINATOR_THREADCHECK("compaction.coordinator.threadcheck.time", "1s",
PropertyType.TIMEDURATION, "The time between adjustments of the server thread pool.",
"2.1.0"),
@Experimental
COMPACTION_COORDINATOR_MAX_MESSAGE_SIZE("compaction.coordinator.message.size.max", "10M",
PropertyType.BYTES, "The maximum size of a message that can be sent to a tablet server.",
"2.1.0"),
@Experimental
COMPACTION_COORDINATOR_DEAD_COMPACTOR_CHECK_INTERVAL(
"compaction.coordinator.compactor.dead.check.interval", "5m", PropertyType.TIMEDURATION,
"The interval at which to check for dead compactors.", "2.1.0"),
@Experimental
COMPACTION_COORDINATOR_FINALIZER_TSERVER_NOTIFIER_MAXTHREADS(
"compaction.coordinator.compaction.finalizer.threads.maximum", "5", PropertyType.COUNT,
"The maximum number of threads to use for notifying tablet servers that an external compaction has completed.",
"2.1.0"),
@Experimental
COMPACTION_COORDINATOR_FINALIZER_COMPLETION_CHECK_INTERVAL(
"compaction.coordinator.compaction.finalizer.check.interval", "60s",
PropertyType.TIMEDURATION,
"The interval at which to check for external compaction final state markers in the metadata table.",
"2.1.0"),
@Experimental
COMPACTION_COORDINATOR_TSERVER_COMPACTION_CHECK_INTERVAL(
"compaction.coordinator.tserver.check.interval", "1m", PropertyType.TIMEDURATION,
"The interval at which to check the tservers for external compactions.", "2.1.0");
private final String key;
private final String defaultValue;
private final String description;
private String deprecatedSince;
private final String availableSince;
private boolean annotationsComputed = false;
private boolean isSensitive;
private boolean isDeprecated;
private boolean isExperimental;
private boolean isReplaced;
private Property replacedBy = null;
private final PropertyType type;
Property(String name, String defaultValue, PropertyType type, String description,
String availableSince) {
this.key = name;
this.defaultValue = defaultValue;
this.description = description;
this.availableSince = availableSince;
this.type = type;
}
@Override
public String toString() {
return this.key;
}
/**
* Gets the key (string) for this property.
*
* @return key
*/
public String getKey() {
return this.key;
}
/**
* Gets the default value for this property. System properties are interpolated into the value if
* necessary.
*
* @return default value
*/
public String getDefaultValue() {
return this.defaultValue;
}
/**
* Gets the type of this property.
*
* @return property type
*/
public PropertyType getType() {
return this.type;
}
/**
* Gets the description of this property.
*
* @return description
*/
public String getDescription() {
return this.description;
}
/**
* Checks if this property is experimental.
*
* @return true if this property is experimental
*/
public boolean isExperimental() {
Preconditions.checkState(annotationsComputed,
"precomputeAnnotations() must be called before calling this method");
return isExperimental;
}
/**
* Checks if this property is deprecated.
*
* @return true if this property is deprecated
*/
public boolean isDeprecated() {
Preconditions.checkState(annotationsComputed,
"precomputeAnnotations() must be called before calling this method");
return isDeprecated;
}
/**
* Gets the version in which the property was deprecated.
*
* @return Accumulo Version
*/
public String deprecatedSince() {
Preconditions.checkState(annotationsComputed,
"precomputeAnnotations() must be called before calling this method");
return deprecatedSince;
}
/**
* Gets the version in which the property was introduced.
*
* @return Accumulo Version
*/
public String availableSince() {
return this.availableSince;
}
/**
* Checks if this property is sensitive.
*
* @return true if this property is sensitive
*/
public boolean isSensitive() {
Preconditions.checkState(annotationsComputed,
"precomputeAnnotations() must be called before calling this method");
return isSensitive;
}
/**
* Checks if this property is replaced.
*
* @return true if this property is replaced
*/
public boolean isReplaced() {
Preconditions.checkState(annotationsComputed,
"precomputeAnnotations() must be called before calling this method");
return isReplaced;
}
/**
* Gets the property in which the tagged property is replaced by.
*
* @return replacedBy
*/
public Property replacedBy() {
Preconditions.checkState(annotationsComputed,
"precomputeAnnotations() must be called before calling this method");
return replacedBy;
}
private void precomputeAnnotations() {
isSensitive =
hasAnnotation(Sensitive.class) || hasPrefixWithAnnotation(getKey(), Sensitive.class);
isDeprecated =
hasAnnotation(Deprecated.class) || hasPrefixWithAnnotation(getKey(), Deprecated.class);
Deprecated dep = getAnnotation(Deprecated.class);
if (dep != null) {
deprecatedSince = dep.since();
}
isExperimental =
hasAnnotation(Experimental.class) || hasPrefixWithAnnotation(getKey(), Experimental.class);
isReplaced =
hasAnnotation(ReplacedBy.class) || hasPrefixWithAnnotation(getKey(), ReplacedBy.class);
ReplacedBy rb = getAnnotation(ReplacedBy.class);
if (rb != null) {
replacedBy = rb.property();
}
annotationsComputed = true;
}
/**
* Checks if a property with the given key is sensitive. The key must be for a valid property, and
* must either itself be annotated as sensitive or have a prefix annotated as sensitive.
*
* @param key property key
* @return true if property is sensitive
*/
public static boolean isSensitive(String key) {
Property prop = propertiesByKey.get(key);
if (prop != null) {
return prop.isSensitive();
}
return validPrefixes.stream().filter(key::startsWith).map(propertiesByKey::get)
.anyMatch(Property::isSensitive);
}
private <T extends Annotation> boolean hasAnnotation(Class<T> annotationType) {
return getAnnotation(annotationType) != null;
}
private <T extends Annotation> T getAnnotation(Class<T> annotationType) {
try {
return getClass().getField(name()).getAnnotation(annotationType);
} catch (SecurityException | NoSuchFieldException e) {
LoggerFactory.getLogger(getClass()).error("{}", e.getMessage(), e);
}
return null;
}
private static <T extends Annotation> boolean hasPrefixWithAnnotation(String key,
Class<T> annotationType) {
Predicate<Property> hasIt = prop -> prop.hasAnnotation(annotationType);
return validPrefixes.stream().filter(key::startsWith).map(propertiesByKey::get).anyMatch(hasIt);
}
private static final HashSet<String> validTableProperties = new HashSet<>();
private static final HashSet<String> validProperties = new HashSet<>();
private static final HashSet<String> validPrefixes = new HashSet<>();
private static final HashMap<String,Property> propertiesByKey = new HashMap<>();
/**
* Checks if the given property and value are valid. A property is valid if the property key is
* valid see {@link #isValidPropertyKey} and that the value is a valid format for the type see
* {@link PropertyType#isValidFormat}.
*
* @param key property key
* @param value property value
* @return true if key is valid (recognized, or has a recognized prefix)
*/
public static boolean isValidProperty(final String key, final String value) {
Property p = getPropertyByKey(key);
if (p == null) {
// If a key doesn't exist yet, then check if it follows a valid prefix
return validPrefixes.stream().anyMatch(key::startsWith);
}
return (isValidPropertyKey(key) && p.getType().isValidFormat(value));
}
/**
* Checks if the given property key is valid. A valid property key is either equal to the key of
* some defined property or has a prefix matching some prefix defined in this class.
*
* @param key property key
* @return true if key is valid (recognized, or has a recognized prefix)
*/
public static boolean isValidPropertyKey(String key) {
return validProperties.contains(key) || validPrefixes.stream().anyMatch(key::startsWith);
}
/**
* Checks if the given property key is a valid property and is of type boolean.
*
* @param key property key
* @return true if key is valid and is of type boolean, false otherwise
*/
public static boolean isValidBooleanPropertyKey(String key) {
return validProperties.contains(key) && getPropertyByKey(key).getType() == PropertyType.BOOLEAN;
}
/**
* Checks if the given property key is for a valid table property. A valid table property key is
* either equal to the key of some defined table property (which each start with
* {@link #TABLE_PREFIX}) or has a prefix matching {@link #TABLE_CONSTRAINT_PREFIX},
* {@link #TABLE_ITERATOR_PREFIX}, or {@link #TABLE_LOCALITY_GROUP_PREFIX}.
*
* @param key property key
* @return true if key is valid for a table property (recognized, or has a recognized prefix)
*/
public static boolean isValidTablePropertyKey(String key) {
return validTableProperties.contains(key) || (key.startsWith(Property.TABLE_PREFIX.getKey())
&& (key.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey())
|| key.startsWith(Property.TABLE_ITERATOR_PREFIX.getKey())
|| key.startsWith(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey())
|| key.startsWith(Property.TABLE_ARBITRARY_PROP_PREFIX.getKey())
|| key.startsWith(TABLE_SAMPLER_OPTS.getKey())
|| key.startsWith(TABLE_SUMMARIZER_PREFIX.getKey())
|| key.startsWith(TABLE_SCAN_DISPATCHER_OPTS.getKey())
|| key.startsWith(TABLE_COMPACTION_DISPATCHER_OPTS.getKey())
|| key.startsWith(TABLE_COMPACTION_CONFIGURER_OPTS.getKey())
|| key.startsWith(TABLE_COMPACTION_SELECTOR_OPTS.getKey()))
|| key.startsWith(TABLE_CRYPTO_PREFIX.getKey()));
}
public static final EnumSet<Property> fixedProperties = EnumSet.of(
// port options
GC_PORT, MANAGER_CLIENTPORT, TSERV_CLIENTPORT,
// tserver cache options
TSERV_CACHE_MANAGER_IMPL, TSERV_DATACACHE_SIZE, TSERV_INDEXCACHE_SIZE,
TSERV_SUMMARYCACHE_SIZE,
// others
TSERV_NATIVEMAP_ENABLED, TSERV_SCAN_MAX_OPENFILES, MANAGER_RECOVERY_WAL_EXISTENCE_CACHE_TIME);
/**
* Checks if the given property may be changed via Zookeeper, but not recognized until the restart
* of some relevant daemon.
*
* @param key property key
* @return true if property may be changed via Zookeeper but only heeded upon some restart
*/
public static boolean isFixedZooPropertyKey(Property key) {
return fixedProperties.contains(key);
}
/**
* Checks if the given property key is valid for a property that may be changed via Zookeeper.
*
* @param key property key
* @return true if key's property may be changed via Zookeeper
*/
public static boolean isValidZooPropertyKey(String key) {
// white list prefixes
return key.startsWith(Property.TABLE_PREFIX.getKey())
|| key.startsWith(Property.TSERV_PREFIX.getKey())
|| key.startsWith(Property.MANAGER_PREFIX.getKey())
|| key.startsWith(Property.GC_PREFIX.getKey())
|| key.startsWith(Property.GENERAL_ARBITRARY_PROP_PREFIX.getKey())
|| key.equals(Property.GENERAL_FILE_NAME_ALLOCATION_BATCH_SIZE_MIN.getKey())
|| key.equals(Property.GENERAL_FILE_NAME_ALLOCATION_BATCH_SIZE_MAX.getKey());
}
/**
* Gets a {@link Property} instance with the given key.
*
* @param key property key
* @return property, or null if not found
*/
public static Property getPropertyByKey(String key) {
return propertiesByKey.get(key);
}
/**
* Checks if this property is expected to have a Java class as a value.
*
* @return true if this is property is a class property
*/
public static boolean isClassProperty(String key) {
return (key.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey())
&& key.substring(Property.TABLE_CONSTRAINT_PREFIX.getKey().length()).split("\\.").length
== 1)
|| (key.startsWith(Property.TABLE_ITERATOR_PREFIX.getKey())
&& key.substring(Property.TABLE_ITERATOR_PREFIX.getKey().length()).split("\\.").length
== 2)
|| key.equals(Property.TABLE_LOAD_BALANCER.getKey());
}
/**
* Creates a new instance of a class specified in a configuration property. The table classpath
* context is used if set.
*
* @param conf configuration containing property
* @param property property specifying class name
* @param base base class of type
* @param defaultInstance instance to use if creation fails
* @return new class instance, or default instance if creation failed
*/
public static <T> T createTableInstanceFromPropertyName(AccumuloConfiguration conf,
Property property, Class<T> base, T defaultInstance) {
String clazzName = conf.get(property);
String context = ClassLoaderUtil.tableContext(conf);
return ConfigurationTypeHelper.getClassInstance(context, clazzName, base, defaultInstance);
}
/**
* Creates a new instance of a class specified in a configuration property.
*
* @param conf configuration containing property
* @param property property specifying class name
* @param base base class of type
* @param defaultInstance instance to use if creation fails
* @return new class instance, or default instance if creation failed
*/
public static <T> T createInstanceFromPropertyName(AccumuloConfiguration conf, Property property,
Class<T> base, T defaultInstance) {
String clazzName = conf.get(property);
return ConfigurationTypeHelper.getClassInstance(null, clazzName, base, defaultInstance);
}
static {
// Precomputing information here avoids :
// * Computing it each time a method is called
// * Using synch to compute the first time a method is called
Predicate<Property> isPrefix = p -> p.getType() == PropertyType.PREFIX;
Arrays.stream(Property.values())
// record all properties by key
.peek(p -> propertiesByKey.put(p.getKey(), p))
// save all the prefix properties
.peek(p -> {
if (isPrefix.test(p)) {
validPrefixes.add(p.getKey());
}
})
// only use the keys for the non-prefix properties from here on
.filter(isPrefix.negate()).map(Property::getKey)
// everything left is a valid property
.peek(validProperties::add)
// but some are also valid table properties
.filter(k -> k.startsWith(Property.TABLE_PREFIX.getKey()))
.forEach(validTableProperties::add);
// order is very important here the following code relies on the maps and sets populated above
Arrays.stream(Property.values()).forEach(Property::precomputeAnnotations);
}
}
| 9,945 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationCopy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.function.Predicate;
import java.util.stream.Stream;
/**
* An {@link AccumuloConfiguration} which holds a flat copy of properties defined in another
* configuration
*/
public class ConfigurationCopy extends AccumuloConfiguration {
private long updateCount = 0;
final Map<String,String> copy = Collections.synchronizedMap(new HashMap<>());
/**
* Creates a new configuration.
*
* @param config configuration property key/value pairs to copy
*/
public ConfigurationCopy(Map<String,String> config) {
this(config.entrySet());
}
/**
* Creates a new configuration.
*
* @param config configuration property stream to use for copying
*/
public ConfigurationCopy(Stream<Entry<String,String>> config) {
this(config::iterator);
}
/**
* Creates a new configuration.
*
* @param config configuration property iterable to use for copying
*/
public ConfigurationCopy(Iterable<Entry<String,String>> config) {
config.forEach(e -> copy.put(e.getKey(), e.getValue()));
}
/**
* Creates a new empty configuration.
*/
public ConfigurationCopy() {
this(new HashMap<>());
}
@Override
public String get(Property property) {
return copy.get(property.getKey());
}
@Override
public void getProperties(Map<String,String> props, Predicate<String> filter) {
for (Entry<String,String> entry : copy.entrySet()) {
if (filter.test(entry.getKey())) {
props.put(entry.getKey(), entry.getValue());
}
}
}
/**
* Sets a property in this configuration.
*
* @param prop property to set
* @param value property value
*/
public void set(Property prop, String value) {
synchronized (copy) {
copy.put(prop.getKey(), value);
updateCount++;
}
}
/**
* Sets a property in this configuration.
*
* @param key key of property to set
* @param value property value
*/
public void set(String key, String value) {
synchronized (copy) {
copy.put(key, value);
updateCount++;
}
}
@Override
public long getUpdateCount() {
synchronized (copy) {
return updateCount;
}
}
@Override
public boolean isPropertySet(Property prop) {
return copy.containsKey(prop.getKey());
}
}
| 9,946 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/HadoopCredentialProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static java.util.Objects.requireNonNull;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Shim around Hadoop's CredentialProviderFactory provided by hadoop-common.
*/
public class HadoopCredentialProvider {
private static final Logger log = LoggerFactory.getLogger(HadoopCredentialProvider.class);
private static final String CREDENTIAL_PROVIDER_PATH = "hadoop.security.credential.provider.path";
// access to cachedProviders should be synchronized when necessary
private static final ConcurrentHashMap<String,List<CredentialProvider>> cachedProviders =
new ConcurrentHashMap<>();
/**
* Set the Hadoop Credential Provider path in the provided Hadoop Configuration.
*
* @param conf the Hadoop Configuration object
* @param path the credential provider paths to set
*/
public static void setPath(Configuration conf, String path) {
conf.set(CREDENTIAL_PROVIDER_PATH, path);
}
/**
* Fetch/cache the configured providers.
*
* @return The List of CredentialProviders, or null if they could not be loaded
*/
private static List<CredentialProvider> getProviders(Configuration conf) {
String path = conf.get(CREDENTIAL_PROVIDER_PATH);
if (path == null || path.isEmpty()) {
log.debug("Failed to get CredentialProviders; no provider path specified");
return null;
}
final List<CredentialProvider> providers;
try {
providers = CredentialProviderFactory.getProviders(conf);
} catch (IOException e) {
log.warn("Exception invoking CredentialProviderFactory.getProviders(conf)", e);
return null;
}
return cachedProviders.computeIfAbsent(path, p -> providers);
}
/**
* Attempt to extract the password from any configured CredentialProviders for the given alias. If
* no providers or credential is found, null is returned.
*
* @param conf Configuration for CredentialProvider
* @param alias Name of CredentialEntry key
* @return The credential if found, null otherwise
*/
public static char[] getValue(Configuration conf, String alias) {
requireNonNull(alias);
List<CredentialProvider> providerList = getProviders(requireNonNull(conf));
return providerList == null ? null : providerList.stream().map(provider -> {
try {
return provider.getCredentialEntry(alias);
} catch (IOException e) {
log.warn("Failed to call getCredentialEntry(alias) for provider {}", provider, e);
return null;
}
}).filter(Objects::nonNull).map(CredentialProvider.CredentialEntry::getCredential).findFirst()
.orElseGet(() -> {
// If we didn't find it, this isn't an error, it just wasn't set in the CredentialProvider
log.trace("Could not extract credential for {} from providers", alias);
return null;
});
}
/**
* Attempt to extract all aliases from any configured CredentialProviders.
*
* @param conf Configuration for the CredentialProvider
* @return A list of aliases. An empty list if no CredentialProviders are configured, or the
* providers are empty.
*/
public static List<String> getKeys(Configuration conf) {
List<CredentialProvider> providerList = getProviders(requireNonNull(conf));
return providerList == null ? Collections.emptyList()
: providerList.stream().flatMap(provider -> {
List<String> aliases = null;
try {
aliases = provider.getAliases();
} catch (IOException e) {
log.warn("Problem getting aliases from provider {}", provider, e);
}
return aliases == null ? Stream.empty() : aliases.stream();
}).collect(Collectors.toList());
}
/**
* Create a CredentialEntry using the configured Providers. If multiple CredentialProviders are
* configured, the first will be used.
*
* @param conf Configuration for the CredentialProvider
* @param name CredentialEntry name (alias)
* @param credential The credential
*/
public static void createEntry(Configuration conf, String name, char[] credential)
throws IOException {
requireNonNull(conf);
requireNonNull(name);
requireNonNull(credential);
List<CredentialProvider> providers = getProviders(conf);
if (providers == null || providers.isEmpty()) {
throw new IOException("Could not fetch any CredentialProviders");
}
CredentialProvider provider = providers.get(0);
if (providers.size() != 1) {
log.warn("Found more than one CredentialProvider. Using first provider found ({})", provider);
}
provider.createCredentialEntry(name, credential);
provider.flush();
}
}
| 9,947 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/DeprecatedPropertyUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static java.util.Objects.requireNonNull;
import java.util.ArrayList;
import java.util.List;
import java.util.function.BiConsumer;
import java.util.function.Predicate;
import java.util.function.UnaryOperator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DeprecatedPropertyUtil {
public static class PropertyRenamer {
final Predicate<String> keyFilter;
final UnaryOperator<String> keyMapper;
public PropertyRenamer(Predicate<String> keyFilter, UnaryOperator<String> keyMapper) {
this.keyFilter = requireNonNull(keyFilter);
this.keyMapper = requireNonNull(keyMapper);
}
public static PropertyRenamer renamePrefix(String oldPrefix, String newPrefix) {
return new PropertyRenamer(p -> p.startsWith(oldPrefix),
p -> newPrefix + p.substring(oldPrefix.length()));
}
}
private static final Logger log = LoggerFactory.getLogger(DeprecatedPropertyUtil.class);
/**
* Ordered list of renamers
*/
protected static final List<PropertyRenamer> renamers = new ArrayList<>();
/**
* Checks if {@code propertyName} is a deprecated property name and return its replacement name,
* if one is available, or the original name if no replacement is available. If a property has a
* replacement that itself was replaced, this method will return the final recommended property,
* after processing each replacement in order. If the final name has changed from the original
* name, the logging action is triggered with a provided logger, the original name, and the
* replacement name.
* <p>
* This is expected to be used only with system properties stored in the SiteConfiguration and
* ZooConfiguration, and not for per-table or per-namespace configuration in ZooKeeper.
*
* @param propertyName the name of the potentially deprecated property to check for a replacement
* name
* @param loggerActionOnReplace the action to execute, if not null, if a replacement name was
* found
* @return either the replacement for {@code propertyName}, or {@code propertyName} if the
* property is not deprecated
*/
public static String getReplacementName(final String propertyName,
BiConsumer<Logger,String> loggerActionOnReplace) {
String replacement = requireNonNull(propertyName);
requireNonNull(loggerActionOnReplace);
for (PropertyRenamer renamer : renamers) {
if (renamer.keyFilter.test(replacement)) {
replacement = renamer.keyMapper.apply(replacement);
}
}
// perform the logger action if the property was replaced
if (!replacement.equals(propertyName)) {
loggerActionOnReplace.accept(log, replacement);
}
return replacement;
}
/**
* @return The list of property renamers
*/
public static List<PropertyRenamer> getPropertyRenamers() {
return renamers;
}
}
| 9,948 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Objects.requireNonNull;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Predicate;
import java.util.stream.Stream;
import org.apache.commons.configuration2.AbstractConfiguration;
import org.apache.commons.configuration2.CompositeConfiguration;
import org.apache.commons.configuration2.MapConfiguration;
import org.apache.commons.configuration2.PropertiesConfiguration;
import org.apache.commons.configuration2.ex.ConfigurationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* An {@link AccumuloConfiguration} which first loads any properties set on the command-line (using
* the -o option) and then from accumulo.properties. This implementation supports defaulting
* undefined property values to a parent configuration's definitions.
* <p>
* The system property "accumulo.properties" can be used to specify the location of the properties
* file on the classpath or filesystem if the path is prefixed with 'file://'. If the system
* property is not defined, it defaults to "accumulo.properties" and will look on classpath for
* file.
* <p>
* <b>Note</b>: Client code should not use this class, and it may be deprecated in the future.
*/
public class SiteConfiguration extends AccumuloConfiguration {
private static final Logger log = LoggerFactory.getLogger(SiteConfiguration.class);
private static final AccumuloConfiguration parent = DefaultConfiguration.getInstance();
public interface Buildable {
SiteConfiguration build();
}
public interface OverridesOption extends Buildable {
Buildable withOverrides(Map<String,String> overrides);
}
static class Builder implements OverridesOption, Buildable {
private URL url = null;
private Map<String,String> overrides = Collections.emptyMap();
// visible to package-private for testing only
Builder() {}
private OverridesOption noFile() {
return this;
}
// exists for testing only
OverridesOption fromUrl(URL propertiesFileUrl) {
url = requireNonNull(propertiesFileUrl);
return this;
}
public OverridesOption fromEnv() {
String configFile = System.getProperty("accumulo.properties", "accumulo.properties");
if (configFile.startsWith("file://")) {
File f;
try {
f = new File(new URI(configFile));
} catch (URISyntaxException e) {
throw new IllegalArgumentException(
"Failed to load Accumulo configuration from " + configFile, e);
}
if (f.exists() && !f.isDirectory()) {
log.info("Found Accumulo configuration at {}", configFile);
return fromFile(f);
} else {
throw new IllegalArgumentException(
"Failed to load Accumulo configuration at " + configFile);
}
} else {
URL accumuloConfigUrl = SiteConfiguration.class.getClassLoader().getResource(configFile);
if (accumuloConfigUrl == null) {
throw new IllegalArgumentException(
"Failed to load Accumulo configuration '" + configFile + "' from classpath");
} else {
log.info("Found Accumulo configuration on classpath at {}", accumuloConfigUrl.getFile());
url = accumuloConfigUrl;
return this;
}
}
}
public OverridesOption fromFile(File propertiesFileLocation) {
try {
url = requireNonNull(propertiesFileLocation).toURI().toURL();
} catch (MalformedURLException e) {
throw new IllegalArgumentException(e);
}
return this;
}
@Override
public Buildable withOverrides(Map<String,String> overrides) {
this.overrides = requireNonNull(overrides);
return this;
}
@Override
public SiteConfiguration build() {
// load properties from configuration file
var propsFileConfig = getPropsFileConfig(url);
// load properties from command-line overrides
var overrideConfig = new MapConfiguration(overrides);
// load credential provider property
var credProviderProps = new HashMap<String,String>();
for (var c : new AbstractConfiguration[] {propsFileConfig, overrideConfig}) {
var credProvider =
c.getString(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
if (credProvider != null && !credProvider.isEmpty()) {
loadCredProviderProps(credProvider, credProviderProps);
break;
}
}
var credProviderConfig = new MapConfiguration(credProviderProps);
var config = new CompositeConfiguration();
// add in specific order; use credential provider first, then overrides, then properties file
config.addConfiguration(credProviderConfig);
config.addConfiguration(overrideConfig);
config.addConfiguration(propsFileConfig);
var result = new HashMap<String,String>();
config.getKeys().forEachRemaining(orig -> {
String resolved = DeprecatedPropertyUtil.getReplacementName(orig, (log, replacement) -> {
log.warn("{} has been deprecated and will be removed in a future release;"
+ " loading its replacement {} instead.", orig, replacement);
});
result.put(resolved, config.getString(orig));
});
return new SiteConfiguration(Collections.unmodifiableMap(result));
}
}
/**
* Build a SiteConfiguration from the environmental configuration with the option to override.
*/
public static SiteConfiguration.OverridesOption fromEnv() {
return new SiteConfiguration.Builder().fromEnv();
}
/**
* Build a SiteConfiguration from the provided properties file with the option to override.
*/
public static SiteConfiguration.OverridesOption fromFile(File propertiesFileLocation) {
return new SiteConfiguration.Builder().fromFile(propertiesFileLocation);
}
/**
* Build a SiteConfiguration that is initially empty with the option to override.
*/
public static SiteConfiguration.OverridesOption empty() {
return new SiteConfiguration.Builder().noFile();
}
/**
* Build a SiteConfiguration from the environmental configuration and no overrides.
*/
public static SiteConfiguration auto() {
return new SiteConfiguration.Builder().fromEnv().build();
}
private final Map<String,String> config;
private SiteConfiguration(Map<String,String> config) {
ConfigCheckUtil.validate(config.entrySet(), "site config");
this.config = config;
}
// load properties from config file
@SuppressFBWarnings(value = "URLCONNECTION_SSRF_FD",
justification = "url is specified by an admin, not unchecked user input")
private static AbstractConfiguration getPropsFileConfig(URL accumuloPropsLocation) {
var config = new PropertiesConfiguration();
if (accumuloPropsLocation != null) {
try (var reader = new InputStreamReader(accumuloPropsLocation.openStream(), UTF_8)) {
config.read(reader);
} catch (ConfigurationException | IOException e) {
throw new IllegalArgumentException(e);
}
}
return config;
}
// load sensitive properties from Hadoop credential provider
private static void loadCredProviderProps(String provider, Map<String,String> props) {
var hadoopConf = new org.apache.hadoop.conf.Configuration();
HadoopCredentialProvider.setPath(hadoopConf, provider);
Stream.of(Property.values()).filter(Property::isSensitive).forEach(p -> {
char[] value = HadoopCredentialProvider.getValue(hadoopConf, p.getKey());
if (value != null) {
props.put(p.getKey(), new String(value));
}
});
}
@Override
public String get(Property property) {
String value = config.get(property.getKey());
if (value == null || !property.getType().isValidFormat(value)) {
if (value != null) {
log.error("Using default value for {} due to improperly formatted {}: {}",
property.getKey(), property.getType(), value);
}
value = parent.get(property);
}
return value;
}
@Override
public boolean isPropertySet(Property prop) {
return config.containsKey(prop.getKey()) || parent.isPropertySet(prop);
}
@Override
public void getProperties(Map<String,String> props, Predicate<String> filter) {
getProperties(props, filter, true);
}
public void getProperties(Map<String,String> props, Predicate<String> filter,
boolean useDefaults) {
if (useDefaults) {
parent.getProperties(props, filter);
}
config.keySet().forEach(k -> {
if (filter.test(k)) {
props.put(k, config.get(k));
}
});
}
@Override
public AccumuloConfiguration getParent() {
return parent;
}
}
| 9,949 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/DefaultConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static com.google.common.base.Suppliers.memoize;
import java.util.Arrays;
import java.util.Map;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* An {@link AccumuloConfiguration} that contains only default values for properties. This class is
* a singleton.
*/
public class DefaultConfiguration extends AccumuloConfiguration {
private static final Supplier<DefaultConfiguration> instance = memoize(DefaultConfiguration::new);
private final Map<String,String> resolvedProps =
Arrays.stream(Property.values()).filter(p -> p.getType() != PropertyType.PREFIX)
.collect(Collectors.toMap(Property::getKey, Property::getDefaultValue));
private DefaultConfiguration() {}
/**
* Gets a default configuration.
*
* @return default configuration
*/
public static DefaultConfiguration getInstance() {
return instance.get();
}
@Override
public String get(Property property) {
return resolvedProps.get(property.getKey());
}
@Override
public void getProperties(Map<String,String> props, Predicate<String> filter) {
resolvedProps.entrySet().stream().filter(p -> filter.test(p.getKey()))
.forEach(e -> props.put(e.getKey(), e.getValue()));
}
@Override
public boolean isPropertySet(Property prop) {
return false;
}
}
| 9,950 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationDocGen.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.IOException;
import java.io.PrintStream;
import java.util.TreeMap;
/**
* This class generates documentation to inform users of the available configuration properties in a
* presentable form.
*/
public class ConfigurationDocGen {
private PrintStream doc;
private final TreeMap<String,Property> sortedProps = new TreeMap<>();
void generate() {
pageHeader();
beginTable("Property");
for (Property prop : sortedProps.values()) {
if (prop.getType() == PropertyType.PREFIX) {
prefixSection(prop);
} else {
property(prop);
}
}
beginSection("Property Types");
beginTable("Type");
propertyTypeDescriptions();
doc.close();
}
void beginSection(String section) {
doc.println("\n### " + section + "\n");
}
void beginTable(String name) {
doc.println("| " + name + " | Description |");
doc.println("|--------------|-------------|");
}
void pageHeader() {
doc.println("---");
doc.println("title: Server Properties");
doc.println("category: configuration");
doc.println("order: 4");
doc.println("---\n");
doc.println("<!-- WARNING: Do not edit this file. It is a generated file"
+ " that is copied from Accumulo build (from core/target/generated-docs) -->\n");
doc.println("Below are properties set in `accumulo.properties` or the"
+ " Accumulo shell that configure Accumulo servers (i.e. tablet server,"
+ " manager, etc). Properties labeled 'Experimental' should not be considered stable"
+ " and have a higher risk of changing in the future.\n");
}
void prefixSection(Property prefix) {
boolean depr = prefix.isDeprecated();
String key = strike("<a name=\"" + prefix.getKey().replace(".", "_")
+ "prefix\" class=\"prop\"></a> **" + prefix.getKey() + "***", depr);
String description = prefix.isExperimental() ? "**Experimental**<br>" : "";
description += "**Available since:** " + prefix.availableSince() + "<br>";
if (depr) {
description += "*Deprecated since:* " + prefix.deprecatedSince() + "<br>";
if (prefix.isReplaced()) {
description += "*Replaced by:* <a href=\"#" + prefix.replacedBy().getKey().replace(".", "_")
+ "prefix\">" + prefix.replacedBy() + "</a><br>";
}
}
description += strike(sanitize(prefix.getDescription()), depr);
doc.println("| " + key + " | " + description + " |");
}
void property(Property prop) {
boolean depr = prop.isDeprecated();
String key = strike(
"<a name=\"" + prop.getKey().replace(".", "_") + "\" class=\"prop\"></a> " + prop.getKey(),
depr);
String description = prop.isExperimental() ? "**Experimental**<br>" : "";
description += "**Available since:** ";
if (prop.getKey().startsWith("manager.")
&& (prop.availableSince().startsWith("1.") || prop.availableSince().startsWith("2.0"))) {
description += "2.1.0 (formerly *master." + prop.getKey().substring(8) + "* since "
+ prop.availableSince() + ")<br>";
} else {
description += prop.availableSince() + "<br>";
}
if (depr) {
description += "*Deprecated since:* " + prop.deprecatedSince() + "<br>";
if (prop.isReplaced()) {
description += "*Replaced by:* <a href=\"#" + prop.replacedBy().getKey().replace(".", "_")
+ "\">" + prop.replacedBy() + "</a><br>";
}
}
description += strike(sanitize(prop.getDescription()), depr) + "<br>"
+ strike("**type:** " + prop.getType().name(), depr) + ", "
+ strike("**zk mutable:** " + isZooKeeperMutable(prop), depr) + ", ";
String defaultValue = sanitize(prop.getDefaultValue()).trim();
if (defaultValue.isEmpty()) {
description += strike("**default value:** empty", depr);
} else if (defaultValue.contains("\n")) {
// deal with multi-line values, skip strikethrough of value
description += strike("**default value:** ", depr) + "\n```\n" + defaultValue + "\n```\n";
} else if (prop.getType() == PropertyType.CLASSNAME
&& defaultValue.startsWith("org.apache.accumulo")) {
description += strike("**default value:** {% jlink -f " + defaultValue + " %}", depr);
} else {
description += strike("**default value:** `" + defaultValue + "`", depr);
}
doc.println("| " + key + " | " + description + " |");
}
private String strike(String s, boolean isDeprecated) {
return (isDeprecated ? "~~" : "") + s + (isDeprecated ? "~~" : "");
}
void propertyTypeDescriptions() {
for (PropertyType type : PropertyType.values()) {
if (type == PropertyType.PREFIX) {
continue;
}
doc.println(
"| " + sanitize(type.toString()) + " | " + sanitize(type.getFormatDescription()) + " |");
}
}
String sanitize(String str) {
return str.replace("\n", "<br>");
}
private ConfigurationDocGen(PrintStream doc) {
this.doc = doc;
for (Property prop : Property.values()) {
this.sortedProps.put(prop.getKey(), prop);
}
}
private String isZooKeeperMutable(Property prop) {
if (!Property.isValidZooPropertyKey(prop.getKey())) {
return "no";
}
if (Property.isFixedZooPropertyKey(prop)) {
return "yes but requires restart of the " + prop.getKey().split("[.]")[0];
}
return "yes";
}
/**
* Generates documentation for accumulo.properties file usage. Arguments are: "--generate-markdown
* filename"
*
* @param args command-line arguments
* @throws IllegalArgumentException if args is invalid
*/
public static void main(String[] args) throws IOException {
if (args.length == 2 && args[0].equals("--generate-markdown")) {
try (var printStream = new PrintStream(args[1], UTF_8)) {
new ConfigurationDocGen(printStream).generate();
}
} else {
throw new IllegalArgumentException(
"Usage: " + ConfigurationDocGen.class.getName() + " --generate-markdown <filename>");
}
}
}
| 9,951 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/Experimental.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* An annotation to denote experimental {@link AccumuloConfiguration} {@link Property} keys.
* Experimental is a feature that is considered a work in progress or incomplete and could change.
*/
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@interface Experimental {
}
| 9,952 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/ClientConfigGenerate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Objects;
import java.util.Set;
import java.util.TreeMap;
import com.google.common.collect.Sets;
/**
* Generates client-properties.md for documentation on Accumulo website and
* accumulo-client.properties for Accumulo distribution tarball
*/
public class ClientConfigGenerate {
private abstract class Format {
abstract void beginSection(String section);
abstract void pageHeader();
abstract void property(ClientProperty prop);
void generate() {
pageHeader();
generateSection("Instance", "instance.");
generateSection("Authentication", "auth.", "auth.type", "auth.principal");
generateSection("Batch Writer", "batch.writer.");
generateSection("Batch Scanner", "batch.scanner.");
generateSection("Scanner", "scanner.");
generateSection("SSL", "ssl.");
generateSection("SASL", "sasl.");
generateSection("Tracing", "trace.");
doc.close();
}
void generateSection(String section, String prefix, String... prefixProps) {
beginSection(section);
for (String prop : prefixProps) {
ClientProperty cp = sortedProps.get(prop);
if (cp != null) {
property(cp);
}
}
Set<String> prefixSet = Sets.newHashSet(prefixProps);
for (ClientProperty prop : sortedProps.values()) {
if (prop.getKey().startsWith(prefix) && !prefixSet.contains(prop.getKey())) {
property(prop);
}
}
}
void generateSection(String section, String prefix) {
generateSection(section, prefix, "");
}
}
private class Markdown extends Format {
@Override
void beginSection(String section) {}
@Override
void pageHeader() {
doc.println("---");
doc.println("title: Client Properties");
doc.println("category: configuration");
doc.println("order: 3");
doc.println("---\n");
doc.println("<!-- WARNING: Do not edit this file. It is a generated file"
+ " that is copied from Accumulo build (from core/target/generated-docs) -->");
doc.println("<!-- Generated by : " + getClass().getName() + " -->\n");
doc.println("Below are properties set in `accumulo-client.properties`"
+ " that configure [Accumulo clients]({{ page.docs_baseurl"
+ " }}/getting-started/clients#connecting). All properties have been part"
+ " of the API since 2.0.0 (unless otherwise specified):\n");
doc.println("| Property | Default value | Since | Description |");
doc.println("|----------|---------------|-------|-------------|");
}
@Override
void property(ClientProperty prop) {
Objects.requireNonNull(prop);
doc.print("| <a name=\"" + prop.getKey().replace(".", "_") + "\" class=\"prop\"></a> "
+ prop.getKey() + " | ");
String defaultValue = sanitize(prop.getDefaultValue()).trim();
if (defaultValue.isEmpty()) {
defaultValue = "*empty*";
}
doc.println(
defaultValue + " | " + prop.getSince() + " | " + sanitize(prop.getDescription() + " |"));
}
String sanitize(String str) {
return str.replace("\n", "<br>");
}
}
private class ConfigFile extends Format {
@Override
void beginSection(String section) {
doc.println("\n## " + section + " properties");
doc.println("## --------------");
}
@Override
void pageHeader() {
doc.println("# Licensed to the Apache Software Foundation (ASF) under one or more");
doc.println("# contributor license agreements. See the NOTICE file distributed with");
doc.println("# this work for additional information regarding copyright ownership.");
doc.println("# The ASF licenses this file to You under the Apache License, Version 2.0");
doc.println("# (the \"License\"); you may not use this file except in compliance with");
doc.println("# the License. You may obtain a copy of the License at");
doc.println("#");
doc.println("# https://www.apache.org/licenses/LICENSE-2.0");
doc.println("#");
doc.println("# Unless required by applicable law or agreed to in writing, software");
doc.println("# distributed under the License is distributed on an \"AS IS\" BASIS,");
doc.println("# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.");
doc.println("# See the License for the specific language governing permissions and");
doc.println("# limitations under the License.\n");
doc.println("################################");
doc.println("## Accumulo client configuration");
doc.println("################################\n");
doc.println("## NOTE - All properties that have a default are set with it. Properties that");
doc.println("## are uncommented must be set by the user.");
}
@Override
void property(ClientProperty prop) {
doc.println("## " + prop.getDescription());
if (!prop.isRequired()) {
doc.print("#");
}
doc.println(prop.getKey() + "=" + prop.getDefaultValue() + "\n");
}
}
private PrintStream doc;
private final TreeMap<String,ClientProperty> sortedProps = new TreeMap<>();
private ClientConfigGenerate(PrintStream doc) {
Objects.requireNonNull(doc);
this.doc = doc;
for (ClientProperty prop : ClientProperty.values()) {
this.sortedProps.put(prop.getKey(), prop);
}
}
private void generateMarkdown() {
new Markdown().generate();
}
private void generateConfigFile() {
new ConfigFile().generate();
}
/**
* Generates markdown and config files for Accumulo client properties. Arguments are:
* "--generate-markdown filename" or "--generate-config filename"
*
* @param args command-line arguments
* @throws IllegalArgumentException if args is invalid
*/
public static void main(String[] args) throws IOException {
if (args.length == 2) {
try (PrintStream stream = new PrintStream(args[1], UTF_8)) {
ClientConfigGenerate clientConfigGenerate = new ClientConfigGenerate(stream);
if (args[0].equals("--generate-markdown")) {
clientConfigGenerate.generateMarkdown();
return;
} else if (args[0].equals("--generate-config")) {
clientConfigGenerate.generateConfigFile();
return;
}
}
}
throw new IllegalArgumentException("Usage: " + ClientConfigGenerate.class.getName()
+ " [--generate-markdown|--generate-config] <filename>");
}
}
| 9,953 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/Sensitive.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* An annotation to denote {@link AccumuloConfiguration} {@link Property} keys which are sensitive,
* and should be masked or hidden when printed.
*/
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@interface Sensitive {
}
| 9,954 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/ReplacedBy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* An annotation to denote that an {@link AccumuloConfiguration} {@link Property} key is replaced by
* a different {@link AccumuloConfiguration} {@link Property} key. This usually means that the
* {@link AccumuloConfiguration} {@link Property} key is {@link Deprecated}.
*/
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@interface ReplacedBy {
Property property();
}
| 9,955 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/ClientProperty.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static com.google.common.base.Preconditions.checkState;
import java.io.File;
import java.io.IOException;
import java.util.Base64;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import org.apache.accumulo.core.client.admin.TableOperations.ImportMappingOptions;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.CredentialProviderToken;
import org.apache.accumulo.core.client.security.tokens.DelegationToken;
import org.apache.accumulo.core.client.security.tokens.KerberosToken;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.spi.scan.ConfigurableScanServerSelector;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public enum ClientProperty {
// Instance
INSTANCE_NAME("instance.name", "", PropertyType.STRING, "Name of Accumulo instance to connect to",
"2.0.0", true),
INSTANCE_ZOOKEEPERS("instance.zookeepers", "localhost:2181", PropertyType.HOSTLIST,
"Zookeeper connection information for Accumulo instance", "2.0.0", true),
INSTANCE_ZOOKEEPERS_TIMEOUT("instance.zookeepers.timeout", "30s", PropertyType.TIMEDURATION,
"Zookeeper session timeout", "2.0.0", false),
// Authentication
AUTH_TYPE("auth.type", "password", PropertyType.STRING,
"Authentication method (i.e password, kerberos, PasswordToken, KerberosToken, etc)", "2.0.0",
true),
AUTH_PRINCIPAL("auth.principal", "", PropertyType.STRING,
"Accumulo principal/username for chosen authentication method", "2.0.0", true),
AUTH_TOKEN("auth.token", "", PropertyType.STRING,
"Authentication token (ex. mypassword, /path/to/keytab)", "2.0.0", true),
// BatchWriter
BATCH_WRITER_MEMORY_MAX("batch.writer.memory.max", "50M", PropertyType.BYTES,
"Max memory (in bytes) to batch before writing", "2.0.0", false),
BATCH_WRITER_LATENCY_MAX("batch.writer.latency.max", "120s", PropertyType.TIMEDURATION,
"Max amount of time (in seconds) to hold data in memory before flushing it", "2.0.0", false),
BATCH_WRITER_TIMEOUT_MAX("batch.writer.timeout.max", "0", PropertyType.TIMEDURATION,
"Max amount of time (in seconds) an unresponsive server will be re-tried. An"
+ " exception is thrown when this timeout is exceeded. Set to zero for no timeout.",
"2.0.0", false),
BATCH_WRITER_THREADS_MAX("batch.writer.threads.max", "3", PropertyType.COUNT,
"Maximum number of threads to use for writing data to tablet servers.", "2.0.0", false),
BATCH_WRITER_DURABILITY("batch.writer.durability", "default", PropertyType.DURABILITY,
Property.TABLE_DURABILITY.getDescription() + " Setting this property will "
+ "change the durability for the BatchWriter session. A value of \"default\" will"
+ " use the table's durability setting. ",
"2.0.0", false),
// ConditionalWriter
CONDITIONAL_WRITER_TIMEOUT_MAX("conditional.writer.timeout.max", "0", PropertyType.TIMEDURATION,
"Maximum amount of time an unresponsive server will be re-tried. A value of 0 will use "
+ "Long.MAX_VALUE.",
"2.1.0", false),
CONDITIONAL_WRITER_THREADS_MAX("conditional.writer.threads.max", "3", PropertyType.COUNT,
"Maximum number of threads to use for writing data to tablet servers.", "2.1.0", false),
CONDITIONAL_WRITER_DURABILITY("conditional.writer.durability", "default", PropertyType.DURABILITY,
Property.TABLE_DURABILITY.getDescription() + " Setting this property will change the "
+ "durability for the ConditionalWriter session. A value of \"default\" will use the"
+ " table's durability setting. ",
"2.1.0", false),
// Scanner
SCANNER_BATCH_SIZE("scanner.batch.size", "1000", PropertyType.COUNT,
"Number of key/value pairs that will be fetched at time from tablet server", "2.0.0", false),
SCAN_SERVER_SELECTOR("scan.server.selector.impl", ConfigurableScanServerSelector.class.getName(),
PropertyType.CLASSNAME, "Class used by client to find Scan Servers", "2.1.0", false),
SCAN_SERVER_SELECTOR_OPTS_PREFIX("scan.server.selector.opts.", "", PropertyType.PREFIX,
"Properties in this category are related to the configuration of the scan.server.selector.impl class",
"2.1.0", false),
// BatchScanner
BATCH_SCANNER_NUM_QUERY_THREADS("batch.scanner.num.query.threads", "3", PropertyType.COUNT,
"Number of concurrent query threads to spawn for querying", "2.0.0", false),
// Bulk load
BULK_LOAD_THREADS("bulk.threads", ImportMappingOptions.BULK_LOAD_THREADS_DEFAULT,
PropertyType.COUNT,
"The number of threads used to inspect bulk load files to determine where files go. "
+ "If the value ends with C, then it will be multiplied by the number of cores on the "
+ "system. This property is only used by the bulk import API introduced in 2.0.0.",
"2.0.0", false),
// SSL
SSL_ENABLED("ssl.enabled", "false", "Enable SSL for client RPC"),
SSL_KEYSTORE_PASSWORD("ssl.keystore.password", "", "Password used to encrypt keystore"),
SSL_KEYSTORE_PATH("ssl.keystore.path", "", PropertyType.PATH, "Path to SSL keystore file",
"2.0.0", false),
SSL_KEYSTORE_TYPE("ssl.keystore.type", "jks", "Type of SSL keystore"),
SSL_TRUSTSTORE_PASSWORD("ssl.truststore.password", "", "Password used to encrypt truststore"),
SSL_TRUSTSTORE_PATH("ssl.truststore.path", "", PropertyType.PATH, "Path to SSL truststore file",
"2.0.0", false),
SSL_TRUSTSTORE_TYPE("ssl.truststore.type", "jks", "Type of SSL truststore"),
SSL_USE_JSSE("ssl.use.jsse", "false", "Use JSSE system properties to configure SSL"),
// SASL
SASL_ENABLED("sasl.enabled", "false", "Enable SASL for client RPC"),
SASL_QOP("sasl.qop", "auth",
"SASL quality of protection. Valid values are 'auth', 'auth-int', and 'auth-conf'"),
SASL_KERBEROS_SERVER_PRIMARY("sasl.kerberos.server.primary", "accumulo",
"Kerberos principal/primary that Accumulo servers use to login"),
// RPC
RPC_TRANSPORT_IDLE_TIMEOUT("rpc.transport.idle.timeout", "3s", PropertyType.TIMEDURATION,
"The maximum duration to leave idle transports open in the client's transport pool", "2.1.0",
false),
;
private final String key;
private final String defaultValue;
private final PropertyType type;
private final String description;
private final String since;
private final boolean required;
ClientProperty(String key, String defaultValue, PropertyType type, String description,
String since, boolean required) {
this.key = Objects.requireNonNull(key);
this.defaultValue = Objects.requireNonNull(defaultValue);
this.type = Objects.requireNonNull(type);
this.description = Objects.requireNonNull(description);
this.since = Objects.requireNonNull(since);
this.required = required;
}
ClientProperty(String key, String defaultValue, String description) {
this(key, defaultValue, PropertyType.STRING, description, "", false);
}
public String getKey() {
return key;
}
public String getDefaultValue() {
return defaultValue;
}
public PropertyType getType() {
return type;
}
public String getDescription() {
return description;
}
public String getSince() {
return since;
}
public boolean isRequired() {
return required;
}
public String getValue(Properties properties) {
Objects.requireNonNull(properties);
String value = properties.getProperty(getKey());
if (value == null || value.isEmpty()) {
value = getDefaultValue();
}
Objects.requireNonNull(value);
if (isRequired() && value.isEmpty()) {
throw new IllegalArgumentException(getKey() + " must be set!");
}
if (!type.isValidFormat(value)) {
throw new IllegalArgumentException(
"Invalid format for type \"" + type + "\" for provided value: " + value);
}
return value;
}
public boolean isEmpty(Properties properties) {
Objects.requireNonNull(properties);
String value = properties.getProperty(getKey());
return (value == null || value.isEmpty());
}
public Long getBytes(Properties properties) {
String value = getValue(properties);
if (value.isEmpty()) {
return null;
}
checkState(getType() == PropertyType.BYTES,
"Invalid type getting bytes. Type must be " + PropertyType.BYTES + ", not " + getType());
return ConfigurationTypeHelper.getMemoryAsBytes(value);
}
public Long getTimeInMillis(Properties properties) {
String value = getValue(properties);
if (value.isEmpty()) {
return null;
}
checkState(getType() == PropertyType.TIMEDURATION, "Invalid type getting time. Type must be "
+ PropertyType.TIMEDURATION + ", not " + getType());
return ConfigurationTypeHelper.getTimeInMillis(value);
}
public Integer getInteger(Properties properties) {
String value = getValue(properties);
if (value.isEmpty()) {
return null;
}
return Integer.parseInt(value);
}
public boolean getBoolean(Properties properties) {
String value = getValue(properties);
if (value.isEmpty()) {
return false;
}
return Boolean.parseBoolean(value);
}
public void setBytes(Properties properties, Long bytes) {
checkState(getType() == PropertyType.BYTES,
"Invalid type setting bytes. Type must be " + PropertyType.BYTES + ", not " + getType());
properties.setProperty(getKey(), bytes.toString());
}
public void setTimeInMillis(Properties properties, Long milliseconds) {
checkState(getType() == PropertyType.TIMEDURATION, "Invalid type setting "
+ "time. Type must be " + PropertyType.TIMEDURATION + ", not " + getType());
properties.setProperty(getKey(), milliseconds + "ms");
}
public static Properties getPrefix(Properties properties, String prefix) {
Properties props = new Properties();
for (Object keyObj : properties.keySet()) {
String key = (String) keyObj;
if (key.startsWith(prefix)) {
props.put(key, properties.getProperty(key));
}
}
return props;
}
public static Map<String,String> toMap(Properties properties) {
Map<String,String> propMap = new HashMap<>();
for (Object obj : properties.keySet()) {
propMap.put((String) obj, properties.getProperty((String) obj));
}
return propMap;
}
public static String encodeToken(AuthenticationToken token) {
return Base64.getEncoder()
.encodeToString(AuthenticationToken.AuthenticationTokenSerializer.serialize(token));
}
public static AuthenticationToken decodeToken(String className, String tokenString) {
return AuthenticationToken.AuthenticationTokenSerializer.deserialize(className,
Base64.getDecoder().decode(tokenString));
}
public static void setPassword(Properties properties, CharSequence password) {
properties.setProperty(ClientProperty.AUTH_TYPE.getKey(), "password");
properties.setProperty(ClientProperty.AUTH_TOKEN.getKey(), password.toString());
}
public static void setKerberosKeytab(Properties properties, String keytabPath) {
properties.setProperty(ClientProperty.AUTH_TYPE.getKey(), "kerberos");
properties.setProperty(ClientProperty.AUTH_TOKEN.getKey(), keytabPath);
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "code runs in same security context as user who providing the token file")
public static AuthenticationToken getAuthenticationToken(Properties properties) {
String authType = ClientProperty.AUTH_TYPE.getValue(properties);
String token = ClientProperty.AUTH_TOKEN.getValue(properties);
switch (authType) {
case "password":
return new PasswordToken(token);
case "PasswordToken":
return decodeToken(PasswordToken.class.getName(), token);
case "kerberos":
try {
String principal = ClientProperty.AUTH_PRINCIPAL.getValue(properties);
return new KerberosToken(principal, new File(token));
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
case "KerberosToken":
return decodeToken(KerberosToken.class.getName(), token);
case "CredentialProviderToken":
return decodeToken(CredentialProviderToken.class.getName(), token);
case "DelegationToken":
return decodeToken(DelegationToken.class.getName(), token);
default:
return decodeToken(authType, token);
}
}
public static void setAuthenticationToken(Properties properties, AuthenticationToken token) {
properties.setProperty(ClientProperty.AUTH_TYPE.getKey(), token.getClass().getName());
properties.setProperty(ClientProperty.AUTH_TOKEN.getKey(), encodeToken(token));
}
public static void validateProperty(Properties properties, ClientProperty prop) {
if (!properties.containsKey(prop.getKey()) || prop.getValue(properties).isEmpty()) {
throw new IllegalArgumentException(prop.getKey() + " is not set");
}
}
public static void validate(Properties properties, boolean validateToken) {
validateProperty(properties, ClientProperty.INSTANCE_NAME);
validateProperty(properties, ClientProperty.INSTANCE_ZOOKEEPERS);
validateProperty(properties, ClientProperty.AUTH_TYPE);
validateProperty(properties, ClientProperty.AUTH_PRINCIPAL);
if (validateToken) {
validateProperty(properties, ClientProperty.AUTH_TOKEN);
}
}
/**
* @throws IllegalArgumentException if Properties does not contain all required
* @throws NullPointerException if {@code properties == null}
*/
public static void validate(Properties properties) {
validate(properties, true);
}
}
| 9,956 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/PropertyType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static java.util.Objects.requireNonNull;
import java.io.IOException;
import java.util.Arrays;
import java.util.Objects;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.apache.accumulo.core.file.rfile.RFile;
import org.apache.commons.lang3.Range;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
/**
* Types of {@link Property} values. Each type has a short name, a description, and a regex which
* valid values match. All of these fields are optional.
*/
public enum PropertyType {
PREFIX(null, x -> false, null),
TIMEDURATION("duration", boundedUnits(0, Long.MAX_VALUE, true, "", "ms", "s", "m", "h", "d"),
"A non-negative integer optionally followed by a unit of time (whitespace"
+ " disallowed), as in 30s.\n"
+ "If no unit of time is specified, seconds are assumed. Valid units"
+ " are 'ms', 's', 'm', 'h' for milliseconds, seconds, minutes, and hours.\n"
+ "Examples of valid durations are '600', '30s', '45m', '30000ms', '3d', and '1h'.\n"
+ "Examples of invalid durations are '1w', '1h30m', '1s 200ms', 'ms', '',"
+ " and 'a'.\nUnless otherwise stated, the max value for the duration"
+ " represented in milliseconds is " + Long.MAX_VALUE),
BYTES("bytes", boundedUnits(0, Long.MAX_VALUE, false, "", "B", "K", "M", "G"),
"A positive integer optionally followed by a unit of memory (whitespace disallowed).\n"
+ "If no unit is specified, bytes are assumed. Valid units are 'B',"
+ " 'K', 'M' or 'G' for bytes, kilobytes, megabytes, gigabytes.\n"
+ "Examples of valid memories are '1024', '20B', '100K', '1500M', '2G', '20%'.\n"
+ "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G',"
+ " '1,024K', '', and 'a'.\n"
+ "Unless otherwise stated, the max value for the memory represented in bytes is "
+ Long.MAX_VALUE),
MEMORY("memory", boundedUnits(0, Long.MAX_VALUE, false, "", "B", "K", "M", "G", "%"),
"A positive integer optionally followed by a unit of memory or a"
+ " percentage (whitespace disallowed).\n"
+ "If a percentage is specified, memory will be a percentage of the"
+ " max memory allocated to a Java process (set by the JVM option -Xmx).\n"
+ "If no unit is specified, bytes are assumed. Valid units are 'B',"
+ " 'K', 'M', 'G', '%' for bytes, kilobytes, megabytes, gigabytes, and percentage.\n"
+ "Examples of valid memories are '1024', '20B', '100K', '1500M', '2G', '20%'.\n"
+ "Examples of invalid memories are '1M500K', '1M 2K', '1MB', '1.5G',"
+ " '1,024K', '', and 'a'.\n"
+ "Unless otherwise stated, the max value for the memory represented in bytes is "
+ Long.MAX_VALUE),
HOSTLIST("host list",
new Matches(
"[\\w-]+(?:\\.[\\w-]+)*(?:\\:\\d{1,5})?(?:,[\\w-]+(?:\\.[\\w-]+)*(?:\\:\\d{1,5})?)*"),
"A comma-separated list of hostnames or ip addresses, with optional port numbers.\n"
+ "Examples of valid host lists are"
+ " 'localhost:2000,www.example.com,10.10.1.1:500' and 'localhost'.\n"
+ "Examples of invalid host lists are '', ':1000', and 'localhost:80000'"),
PORT("port",
x -> Stream.of(new Bounds(1024, 65535), in(true, "0"), new PortRange("\\d{4,5}-\\d{4,5}"))
.anyMatch(y -> y.test(x)),
"An positive integer in the range 1024-65535 (not already in use or"
+ " specified elsewhere in the configuration),\n"
+ "zero to indicate any open ephemeral port, or a range of positive"
+ " integers specified as M-N"),
COUNT("count", new Bounds(0, Integer.MAX_VALUE),
"A non-negative integer in the range of 0-" + Integer.MAX_VALUE),
FRACTION("fraction/percentage", new FractionPredicate(),
"A floating point number that represents either a fraction or, if"
+ " suffixed with the '%' character, a percentage.\n"
+ "Examples of valid fractions/percentages are '10', '1000%', '0.05',"
+ " '5%', '0.2%', '0.0005'.\n"
+ "Examples of invalid fractions/percentages are '', '10 percent', 'Hulk Hogan'"),
PATH("path", x -> true,
"A string that represents a filesystem path, which can be either relative"
+ " or absolute to some directory. The filesystem depends on the property. "
+ "Substitutions of the ACCUMULO_HOME environment variable can be done in the system "
+ "config file using '${env:ACCUMULO_HOME}' or similar."),
ABSOLUTEPATH("absolute path",
x -> x == null || x.trim().isEmpty() || new Path(x.trim()).isAbsolute(),
"An absolute filesystem path. The filesystem depends on the property."
+ " This is the same as path, but enforces that its root is explicitly specified."),
CLASSNAME("java class", new Matches("[\\w$.]*"),
"A fully qualified java class name representing a class on the classpath.\n"
+ "An example is 'java.lang.String', rather than 'String'"),
CLASSNAMELIST("java class list", new Matches("[\\w$.,]*"),
"A list of fully qualified java class names representing classes on the classpath.\n"
+ "An example is 'java.lang.String', rather than 'String'"),
DURABILITY("durability", in(false, null, "default", "none", "log", "flush", "sync"),
"One of 'none', 'log', 'flush' or 'sync'."),
GC_POST_ACTION("gc_post_action", in(true, null, "none", "flush", "compact"),
"One of 'none', 'flush', or 'compact'."),
LAST_LOCATION_MODE("last_location_mode", in(true, null, "assignment", "compaction"),
"Defines how to update the last location. One of 'assignment', or 'compaction'."),
STRING("string", x -> true,
"An arbitrary string of characters whose format is unspecified and"
+ " interpreted based on the context of the property to which it applies."),
JSON("json", new ValidJson(),
"An arbitrary string that is represents a valid, parsable generic json object."
+ "The validity of the json object in the context of the property usage is not checked by this type."),
BOOLEAN("boolean", in(false, null, "true", "false"),
"Has a value of either 'true' or 'false' (case-insensitive)"),
URI("uri", x -> true, "A valid URI"),
FILENAME_EXT("file name extension", in(true, RFile.EXTENSION),
"One of the currently supported filename extensions for storing table data files. "
+ "Currently, only " + RFile.EXTENSION + " is supported.");
private final String shortname;
private final String format;
// Field is transient because enums are Serializable, but Predicates aren't necessarily,
// and our lambdas certainly aren't; This shouldn't matter because enum serialization doesn't
// store fields, so this is a false positive in our spotbugs version
// see https://github.com/spotbugs/spotbugs/issues/740
private transient final Predicate<String> predicate;
PropertyType(String shortname, Predicate<String> predicate, String formatDescription) {
this.shortname = shortname;
this.predicate = Objects.requireNonNull(predicate);
this.format = formatDescription;
}
@Override
public String toString() {
return shortname;
}
/**
* Gets the description of this type.
*
* @return description
*/
String getFormatDescription() {
return format;
}
/**
* Checks if the given value is valid for this type.
*
* @return true if value is valid or null, or if this type has no regex
*/
public boolean isValidFormat(String value) {
// this can't happen because enum fields aren't serialized, so it doesn't matter if the
// predicate was transient or not, but it's probably not hurting anything to check and provide
// the helpful error message for troubleshooting, just in case
Preconditions.checkState(predicate != null,
"Predicate was null, maybe this enum was serialized????");
return predicate.test(value);
}
/**
* Validate that the provided string can be parsed into a json object. This implementation uses
* jackson databind because it is less permissive that GSON for what is considered valid. This
* implementation cannot guarantee that the json is valid for the target usage. That would require
* something like a json schema or a check specific to the use-case. This is only trying to
* provide a generic, minimal check that at least the json is valid.
*/
private static class ValidJson implements Predicate<String> {
private static final Logger log = LoggerFactory.getLogger(ValidJson.class);
// ObjectMapper is thread-safe, but uses synchronization. If this causes contention, ThreadLocal
// may be an option.
private final ObjectMapper jsonMapper =
new ObjectMapper().enable(DeserializationFeature.FAIL_ON_READING_DUP_TREE_KEY)
.enable(DeserializationFeature.FAIL_ON_TRAILING_TOKENS);
// set a limit of 1 million characters on the string as rough guard on invalid input
private static final int ONE_MILLION = 1024 * 1024;
@Override
public boolean test(String value) {
try {
if (value.length() > ONE_MILLION) {
log.info("provided json string length {} is greater than limit of {} for parsing",
value.length(), ONE_MILLION);
return false;
}
jsonMapper.readTree(value);
return true;
} catch (IOException ex) {
return false;
}
}
}
private static Predicate<String> in(final boolean caseSensitive, final String... allowedSet) {
if (caseSensitive) {
return x -> Arrays.stream(allowedSet)
.anyMatch(y -> (x == null && y == null) || (x != null && x.equals(y)));
} else {
Function<String,String> toLower = x -> x == null ? null : x.toLowerCase();
return x -> Arrays.stream(allowedSet).map(toLower)
.anyMatch(y -> (x == null && y == null) || (x != null && toLower.apply(x).equals(y)));
}
}
private static Predicate<String> boundedUnits(final long lowerBound, final long upperBound,
final boolean caseSensitive, final String... suffixes) {
Predicate<String> suffixCheck = new HasSuffix(caseSensitive, suffixes);
return x -> x == null
|| (suffixCheck.test(x) && new Bounds(lowerBound, upperBound).test(stripUnits.apply(x)));
}
private static final Pattern SUFFIX_REGEX = Pattern.compile("[^\\d]*$");
private static final Function<String,String> stripUnits =
x -> x == null ? null : SUFFIX_REGEX.matcher(x.trim()).replaceAll("");
private static class HasSuffix implements Predicate<String> {
private final Predicate<String> p;
public HasSuffix(final boolean caseSensitive, final String... suffixes) {
p = in(caseSensitive, suffixes);
}
@Override
public boolean test(final String input) {
requireNonNull(input);
Matcher m = SUFFIX_REGEX.matcher(input);
if (m.find()) {
if (m.groupCount() != 0) {
throw new AssertionError(m.groupCount());
}
return p.test(m.group());
} else {
return true;
}
}
}
private static class FractionPredicate implements Predicate<String> {
@Override
public boolean test(final String input) {
if (input == null) {
return true;
}
try {
double d;
if (!input.isEmpty() && input.charAt(input.length() - 1) == '%') {
d = Double.parseDouble(input.substring(0, input.length() - 1));
} else {
d = Double.parseDouble(input);
}
return d >= 0;
} catch (NumberFormatException e) {
return false;
}
}
}
private static class Bounds implements Predicate<String> {
private final long lowerBound, upperBound;
private final boolean lowerInclusive, upperInclusive;
public Bounds(final long lowerBound, final long upperBound) {
this(lowerBound, true, upperBound, true);
}
public Bounds(final long lowerBound, final boolean lowerInclusive, final long upperBound,
final boolean upperInclusive) {
this.lowerBound = lowerBound;
this.lowerInclusive = lowerInclusive;
this.upperBound = upperBound;
this.upperInclusive = upperInclusive;
}
@Override
public boolean test(final String input) {
if (input == null) {
return true;
}
long number;
try {
number = Long.parseLong(input);
} catch (NumberFormatException e) {
return false;
}
if (number < lowerBound || (!lowerInclusive && number == lowerBound)) {
return false;
}
return number <= upperBound && (upperInclusive || number != upperBound);
}
}
private static class Matches implements Predicate<String> {
protected final Pattern pattern;
public Matches(final String pattern) {
this(pattern, Pattern.DOTALL);
}
public Matches(final String pattern, int flags) {
this(Pattern.compile(requireNonNull(pattern), flags));
}
public Matches(final Pattern pattern) {
requireNonNull(pattern);
this.pattern = pattern;
}
@Override
public boolean test(final String input) {
// TODO when the input is null, it just means that the property wasn't set
// we can add checks for not null for required properties with
// Predicates.and(Predicates.notNull(), ...),
// or we can stop assuming that null is always okay for a Matches predicate, and do that
// explicitly with Predicates.or(Predicates.isNull(), ...)
return input == null || pattern.matcher(input).matches();
}
}
public static class PortRange extends Matches {
public static final Range<Integer> VALID_RANGE = Range.between(1024, 65535);
public PortRange(final String pattern) {
super(pattern);
}
@Override
public boolean test(final String input) {
if (super.test(input)) {
try {
PortRange.parse(input);
return true;
} catch (IllegalArgumentException e) {
return false;
}
} else {
return false;
}
}
public static IntStream parse(String portRange) {
int idx = portRange.indexOf('-');
if (idx != -1) {
int low = Integer.parseInt(portRange.substring(0, idx));
int high = Integer.parseInt(portRange.substring(idx + 1));
if (!VALID_RANGE.contains(low) || !VALID_RANGE.contains(high) || low > high) {
throw new IllegalArgumentException(
"Invalid port range specified, only 1024 to 65535 supported.");
}
return IntStream.rangeClosed(low, high);
}
throw new IllegalArgumentException(
"Invalid port range specification, must use M-N notation.");
}
}
}
| 9,957 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/ConfigCheckUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import java.util.Map.Entry;
import java.util.Objects;
import org.apache.accumulo.core.spi.crypto.CryptoServiceFactory;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
/**
* A utility class for validating {@link AccumuloConfiguration} instances.
*/
public class ConfigCheckUtil {
private static final Logger log = LoggerFactory.getLogger(ConfigCheckUtil.class);
private static final String PREFIX = "BAD CONFIG ";
/**
* Validates the given configuration entries. A valid configuration contains only valid properties
* (i.e., defined or otherwise valid) that are not prefixes and whose values are formatted
* correctly for their property types. A valid configuration also contains a value for property
* {@link Property#INSTANCE_ZK_TIMEOUT} within a valid range.
*
* @param entries iterable through configuration keys and values
* @param source the namespace, table id, site or system config where for diagnostic messages
* @throws ConfigCheckException if a fatal configuration error is found
*/
public static void validate(Iterable<Entry<String,String>> entries, @NonNull String source) {
String instanceZkTimeoutValue = null;
for (Entry<String,String> entry : entries) {
String key = entry.getKey();
String value = entry.getValue();
Property prop = Property.getPropertyByKey(entry.getKey());
if (prop == null && Property.isValidPropertyKey(key)) {
continue; // unknown valid property (i.e. has proper prefix)
} else if (prop == null) {
log.warn(PREFIX + "unrecognized property key ({}) for {}", key, source);
} else if (prop.getType() == PropertyType.PREFIX) {
fatal(PREFIX + "incomplete property key (" + key + ") for " + source);
} else if (!prop.getType().isValidFormat(value)) {
fatal(PREFIX + "improperly formatted value for key (" + key + ", type=" + prop.getType()
+ ") : " + value + " for " + source);
}
if (key.equals(Property.INSTANCE_ZK_TIMEOUT.getKey())) {
instanceZkTimeoutValue = value;
}
// If the block size or block size index is configured to be too large, we throw an exception
// to avoid potentially corrupting RFiles later
if (key.equals(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey())
|| key.equals(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey())) {
long bsize = ConfigurationTypeHelper.getFixedMemoryAsBytes(value);
Preconditions.checkArgument(bsize > 0 && bsize < Integer.MAX_VALUE, key
+ " must be greater than 0 and less than " + Integer.MAX_VALUE + " but was: " + bsize);
}
if (key.equals(Property.INSTANCE_CRYPTO_FACTORY.getKey())) {
String cryptoStrategy = Objects.requireNonNull(value);
verifyValidClassName(key, cryptoStrategy, CryptoServiceFactory.class);
}
}
if (instanceZkTimeoutValue != null) {
checkTimeDuration(Property.INSTANCE_ZK_TIMEOUT, instanceZkTimeoutValue,
new CheckTimeDurationBetween(1000, 300000));
}
}
private interface CheckTimeDuration {
boolean check(long propVal);
String getDescription(Property prop);
}
private static class CheckTimeDurationBetween implements CheckTimeDuration {
long min, max;
CheckTimeDurationBetween(long x, long y) {
min = Math.min(x, y);
max = Math.max(x, y);
}
@Override
public boolean check(long propVal) {
return propVal >= min && propVal <= max;
}
@Override
public String getDescription(Property prop) {
return "ensure " + min + " <= " + prop + " <= " + max;
}
}
private static void checkTimeDuration(Property prop, String value, CheckTimeDuration chk) {
verifyPropertyTypes(PropertyType.TIMEDURATION, prop);
if (!chk.check(ConfigurationTypeHelper.getTimeInMillis(value))) {
fatal(PREFIX + chk.getDescription(prop));
}
}
private static void verifyPropertyTypes(PropertyType type, Property... properties) {
for (Property prop : properties) {
if (prop.getType() != type) {
fatal("Unexpected property type (" + prop.getType() + " != " + type + ")");
}
}
}
/**
* The exception thrown when {@link ConfigCheckUtil#validate(Iterable, String)} fails.
*/
public static class ConfigCheckException extends RuntimeException {
private static final long serialVersionUID = 1L;
/**
* Creates a new exception with the given message.
*/
public ConfigCheckException(String msg) {
super(msg);
}
}
private static void fatal(String msg) {
// ACCUMULO-3651 Level changed from fatal to error and FATAL added to message for slf4j
// compatibility
log.error("FATAL: {}", msg);
throw new ConfigCheckException(msg);
}
/**
* Verifies a configured option is a legal class and has a required base class.
*
* @param confOption The Property key name
* @param className The Property value, the string representation of a class to be loaded
* @param requiredBaseClass The base class required for the className
*/
private static void verifyValidClassName(String confOption, String className,
Class<?> requiredBaseClass) {
try {
ConfigurationTypeHelper.getClassInstance(null, className, requiredBaseClass);
} catch (ReflectiveOperationException e) {
fatal(confOption + " has an invalid class name: " + className);
} catch (ClassCastException e) {
fatal(confOption + " must implement " + requiredBaseClass
+ ", but the configured class does not: " + className);
}
}
}
| 9,958 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/ConfigurationTypeHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static java.util.concurrent.TimeUnit.DAYS;
import static java.util.concurrent.TimeUnit.HOURS;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.SECONDS;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.classloader.ClassLoaderUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ConfigurationTypeHelper {
private static final Logger log = LoggerFactory.getLogger(ConfigurationTypeHelper.class);
/**
* Interprets a string specifying bytes. A bytes type is specified as a long integer followed by
* an optional B (bytes), K (KB), M (MB), or G (GB).
*
* @param str String value
* @return interpreted memory size in bytes
*/
public static long getFixedMemoryAsBytes(String str) {
char lastChar = str.charAt(str.length() - 1);
if (lastChar == 'b') {
log.warn(
"The 'b' in {} is being considered as bytes. Setting memory by bits is not supported",
str);
}
try {
int multiplier;
switch (Character.toUpperCase(lastChar)) {
case 'G':
multiplier = 30;
break;
case 'M':
multiplier = 20;
break;
case 'K':
multiplier = 10;
break;
case 'B':
multiplier = 0;
break;
default:
return Long.parseLong(str);
}
return Long.parseLong(str.substring(0, str.length() - 1)) << multiplier;
} catch (Exception ex) {
throw new IllegalArgumentException(
"The value '" + str + "' is not a valid memory setting. A valid value would a number "
+ "possibly followed by an optional 'G', 'M', 'K', or 'B'.");
}
}
/**
* Interprets a string specifying a Memory type which is specified as a long integer followed by
* an optional B (bytes), K (KB), M (MB), G (GB) or % (percentage).
*
* @param str String value
* @return interpreted memory size in bytes
*/
public static long getMemoryAsBytes(String str) {
char lastChar = str.charAt(str.length() - 1);
if (lastChar == '%') {
try {
int percent = Integer.parseInt(str.substring(0, str.length() - 1));
if (percent <= 0 || percent >= 100) {
throw new IllegalArgumentException(
"The value '" + str + "' is not a valid memory setting.");
}
return Runtime.getRuntime().maxMemory() * percent / 100;
} catch (Exception ex) {
throw new IllegalArgumentException(
"The value '" + str + "' is not a valid memory setting.");
}
}
return getFixedMemoryAsBytes(str);
}
/**
* Interprets a string specifying a time duration. A time duration is specified as a long integer
* followed by an optional d (days), h (hours), m (minutes), s (seconds), or ms (milliseconds). A
* value without a unit is interpreted as seconds.
*
* @param str string value
* @return interpreted time duration in milliseconds
*/
public static long getTimeInMillis(String str) {
TimeUnit timeUnit;
int unitsLen = 1;
switch (str.charAt(str.length() - 1)) {
case 'd':
timeUnit = DAYS;
break;
case 'h':
timeUnit = HOURS;
break;
case 'm':
timeUnit = MINUTES;
break;
case 's':
timeUnit = SECONDS;
if (str.endsWith("ms")) {
timeUnit = MILLISECONDS;
unitsLen = 2;
}
break;
default:
timeUnit = SECONDS;
unitsLen = 0;
break;
}
return timeUnit.toMillis(Long.parseLong(str.substring(0, str.length() - unitsLen)));
}
/**
* Interprets a string specifying a fraction. A fraction is specified as a double. An optional %
* at the end signifies a percentage.
*
* @param str string value
* @return interpreted fraction as a decimal value
*/
public static double getFraction(String str) {
if (!str.isEmpty() && str.charAt(str.length() - 1) == '%') {
return Double.parseDouble(str.substring(0, str.length() - 1)) / 100.0;
}
return Double.parseDouble(str);
}
// This is not a cache for loaded classes, just a way to avoid spamming the debug log
private static Map<String,Class<?>> loaded = Collections.synchronizedMap(new HashMap<>());
/**
* Loads a class in the given classloader context, suppressing any exceptions, and optionally
* providing a default instance to use.
*
* @param context the per-table context, can be null
* @param clazzName the name of the class to load
* @param base the type of the class
* @param defaultInstance a default instance if the class cannot be loaded
* @return a new instance of the class, or the defaultInstance
*/
public static <T> T getClassInstance(String context, String clazzName, Class<T> base,
T defaultInstance) {
T instance = null;
try {
instance = getClassInstance(context, clazzName, base);
} catch (RuntimeException | ReflectiveOperationException e) {
log.error("Failed to load class {} in classloader context {}", clazzName, context, e);
}
if (instance == null && defaultInstance != null) {
log.info("Using default class {}", defaultInstance.getClass().getName());
instance = defaultInstance;
}
return instance;
}
/**
* Loads a class in the given classloader context.
*
* @param context the per-table context, can be null
* @param clazzName the name of the class to load
* @param base the type of the class
* @return a new instance of the class
*/
public static <T> T getClassInstance(String context, String clazzName, Class<T> base)
throws ReflectiveOperationException {
T instance;
Class<? extends T> clazz = ClassLoaderUtil.loadClass(context, clazzName, base);
instance = clazz.getDeclaredConstructor().newInstance();
if (loaded.put(clazzName, clazz) != clazz) {
log.debug("Loaded class : {}", clazzName);
}
return instance;
}
/**
* Get the number of threads from string property. If the value ends with C, then it will be
* multiplied by the number of cores.
*/
public static int getNumThreads(String threads) {
if (threads == null) {
threads = ClientProperty.BULK_LOAD_THREADS.getDefaultValue();
}
int nThreads;
if (threads.toUpperCase().endsWith("C")) {
nThreads = Runtime.getRuntime().availableProcessors()
* Integer.parseInt(threads.substring(0, threads.length() - 1));
} else {
nThreads = Integer.parseInt(threads);
}
return nThreads;
}
}
| 9,959 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/AccumuloConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static org.apache.accumulo.core.conf.Property.GENERAL_ARBITRARY_PROP_PREFIX;
import static org.apache.accumulo.core.conf.Property.INSTANCE_CRYPTO_PREFIX;
import static org.apache.accumulo.core.conf.Property.TABLE_CRYPTO_PREFIX;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalInt;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import org.apache.accumulo.core.conf.PropertyType.PortRange;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
/**
* A configuration object.
*/
public abstract class AccumuloConfiguration implements Iterable<Entry<String,String>> {
private static class PrefixProps {
final long updateCount;
final Map<String,String> props;
PrefixProps(Map<String,String> props, long updateCount) {
this.updateCount = updateCount;
this.props = props;
}
}
private volatile EnumMap<Property,PrefixProps> cachedPrefixProps = new EnumMap<>(Property.class);
private final Lock prefixCacheUpdateLock = new ReentrantLock();
private static final Logger log = LoggerFactory.getLogger(AccumuloConfiguration.class);
/**
* Gets a property value from this configuration.
*
* <p>
* Note: this is inefficient for values that are not a {@link Property}. For retrieving multiple
* properties, use {@link #getProperties(Map, Predicate)} with a custom filter.
*
* @param property property to get
* @return property value
*/
public String get(String property) {
Property p = Property.getPropertyByKey(property);
if (p != null) {
return get(p);
} else {
Map<String,String> propMap = new HashMap<>(1);
getProperties(propMap, key -> Objects.equals(property, key));
return propMap.get(property);
}
}
/**
* Gets a property value from this configuration.
*
* @param property property to get
* @return property value
*/
public abstract String get(Property property);
/**
* Given a property that is not deprecated and an ordered list of deprecated properties, determine
* which one to use based on which is set by the user in the configuration. If the non-deprecated
* property is set, use that. Otherwise, use the first deprecated property that is set. If no
* deprecated properties are set, use the non-deprecated property by default, even though it is
* not set (since it is not set, it will resolve to its default value). Since the deprecated
* properties are checked in order, newer properties should be on the left, replacing older
* properties on the right, so if a newer property is set, it will be selected over any older
* property that may also be set.
*/
public Property resolve(Property property, Property... deprecated) {
if (property.isDeprecated()) {
throw new IllegalArgumentException("Unexpected deprecated " + property.name());
}
for (Property p : deprecated) {
if (!p.isDeprecated()) {
var notDeprecated = Stream.of(deprecated).filter(Predicate.not(Property::isDeprecated))
.map(Property::name).collect(Collectors.toList());
throw new IllegalArgumentException("Unexpected non-deprecated " + notDeprecated);
}
}
return isPropertySet(property) ? property
: Stream.of(deprecated).filter(this::isPropertySet).findFirst().orElse(property);
}
/**
* Returns property key/value pairs in this configuration. The pairs include those defined in this
* configuration which pass the given filter, and those supplied from the parent configuration
* which are not included from here.
*
* @param props properties object to populate
* @param filter filter for accepting properties from this configuration
*/
public abstract void getProperties(Map<String,String> props, Predicate<String> filter);
/**
* Returns an iterator over property key/value pairs in this configuration. Some implementations
* may elect to omit properties.
*
* @return iterator over properties
*/
@Override
public Iterator<Entry<String,String>> iterator() {
Predicate<String> all = x -> true;
TreeMap<String,String> entries = new TreeMap<>();
getProperties(entries, all);
return entries.entrySet().iterator();
}
private static void checkType(Property property, PropertyType type) {
if (!property.getType().equals(type)) {
String msg = "Configuration method intended for type " + type + " called with a "
+ property.getType() + " argument (" + property.getKey() + ")";
IllegalArgumentException err = new IllegalArgumentException(msg);
log.error(msg, err);
throw err;
}
}
/**
* Each time configuration changes, this counter should increase. Anything that caches information
* that is derived from configuration can use this method to know when to update.
*/
public long getUpdateCount() {
return 0;
}
/**
* Gets all properties under the given prefix in this configuration.
*
* @param property prefix property, must be of type PropertyType.PREFIX
* @return a map of property keys to values
* @throws IllegalArgumentException if property is not a prefix
*/
public Map<String,String> getAllPropertiesWithPrefix(Property property) {
checkType(property, PropertyType.PREFIX);
PrefixProps prefixProps = cachedPrefixProps.get(property);
long currentCount = getUpdateCount();
if (prefixProps == null || prefixProps.updateCount != currentCount) {
prefixCacheUpdateLock.lock();
try {
// Very important that update count is read before getting properties. Also only read it
// once.
long updateCount = getUpdateCount();
prefixProps = cachedPrefixProps.get(property);
if (prefixProps == null || prefixProps.updateCount != updateCount) {
Map<String,String> propMap = new HashMap<>();
// The reason this caching exists is to avoid repeatedly making this expensive call.
getProperties(propMap, key -> key.startsWith(property.getKey()));
propMap = Map.copyOf(propMap);
// So that locking is not needed when reading from enum map, always create a new one.
// Construct and populate map using a local var so its not visible
// until ready.
EnumMap<Property,PrefixProps> localPrefixes = new EnumMap<>(Property.class);
// carry forward any other cached prefixes
localPrefixes.putAll(cachedPrefixProps);
// put the updates
prefixProps = new PrefixProps(propMap, updateCount);
localPrefixes.put(property, prefixProps);
// make the newly constructed map available
cachedPrefixProps = localPrefixes;
}
} finally {
prefixCacheUpdateLock.unlock();
}
}
return prefixProps.props;
}
public Map<String,String> getAllPropertiesWithPrefixStripped(Property prefix) {
final var builder = ImmutableMap.<String,String>builder();
getAllPropertiesWithPrefix(prefix).forEach((k, v) -> {
String optKey = k.substring(prefix.getKey().length());
builder.put(optKey, v);
});
return builder.build();
}
public Map<String,String> getAllCryptoProperties() {
Map<String,String> allProps = new HashMap<>();
allProps.putAll(getAllPropertiesWithPrefix(INSTANCE_CRYPTO_PREFIX));
allProps.putAll(getAllPropertiesWithPrefix(GENERAL_ARBITRARY_PROP_PREFIX));
allProps.putAll(getAllPropertiesWithPrefix(TABLE_CRYPTO_PREFIX));
return allProps;
}
/**
* Gets a property of type {@link PropertyType#BYTES} or {@link PropertyType#MEMORY}, interpreting
* the value properly.
*
* @param property Property to get
* @return property value
* @throws IllegalArgumentException if the property is of the wrong type
*/
public long getAsBytes(Property property) {
String memString = get(property);
if (property.getType() == PropertyType.MEMORY) {
return ConfigurationTypeHelper.getMemoryAsBytes(memString);
} else if (property.getType() == PropertyType.BYTES) {
return ConfigurationTypeHelper.getFixedMemoryAsBytes(memString);
} else {
throw new IllegalArgumentException(property.getKey() + " is not of BYTES or MEMORY type");
}
}
/**
* Gets a property of type {@link PropertyType#TIMEDURATION}, interpreting the value properly.
*
* @param property property to get
* @return property value
* @throws IllegalArgumentException if the property is of the wrong type
*/
public long getTimeInMillis(Property property) {
checkType(property, PropertyType.TIMEDURATION);
return ConfigurationTypeHelper.getTimeInMillis(get(property));
}
/**
* Gets a property of type {@link PropertyType#BOOLEAN}, interpreting the value properly (using
* <code>Boolean.parseBoolean()</code>).
*
* @param property property to get
* @return property value
* @throws IllegalArgumentException if the property is of the wrong type
*/
public boolean getBoolean(Property property) {
checkType(property, PropertyType.BOOLEAN);
return Boolean.parseBoolean(get(property));
}
/**
* Gets a property of type {@link PropertyType#FRACTION}, interpreting the value properly.
*
* @param property property to get
* @return property value
* @throws IllegalArgumentException if the property is of the wrong type
*/
public double getFraction(Property property) {
checkType(property, PropertyType.FRACTION);
return ConfigurationTypeHelper.getFraction(get(property));
}
/**
* Gets a property of type {@link PropertyType#PORT}, interpreting the value properly (as an
* integer within the range of non-privileged ports). Consider using
* {@link #getPortStream(Property)}, if an array is not needed.
*
* @param property property to get
* @return property value
* @throws IllegalArgumentException if the property is of the wrong type
*/
public int[] getPort(Property property) {
return getPortStream(property).toArray();
}
/**
* Same as {@link #getPort(Property)}, but as an {@link IntStream}.
*/
public IntStream getPortStream(Property property) {
checkType(property, PropertyType.PORT);
String portString = get(property);
try {
return PortRange.parse(portString);
} catch (IllegalArgumentException e) {
try {
int port = Integer.parseInt(portString);
if (port == 0 || PortRange.VALID_RANGE.contains(port)) {
return IntStream.of(port);
} else {
log.error("Invalid port number {}; Using default {}", port, property.getDefaultValue());
return IntStream.of(Integer.parseInt(property.getDefaultValue()));
}
} catch (NumberFormatException e1) {
throw new IllegalArgumentException("Invalid port syntax. Must be a single positive "
+ "integers or a range (M-N) of positive integers");
}
}
}
/**
* Gets a property of type {@link PropertyType#COUNT}, interpreting the value properly (as an
* integer).
*
* @param property property to get
* @return property value
* @throws IllegalArgumentException if the property is of the wrong type
*/
public int getCount(Property property) {
checkType(property, PropertyType.COUNT);
String countString = get(property);
return Integer.parseInt(countString);
}
/**
* Gets a property of type {@link PropertyType#PATH}.
*
* @param property property to get
* @return property value
* @throws IllegalArgumentException if the property is of the wrong type
*/
public String getPath(Property property) {
checkType(property, PropertyType.PATH);
String pathString = get(property);
if (pathString == null) {
return null;
}
if (pathString.contains("$ACCUMULO_")) {
throw new IllegalArgumentException("Environment variable interpolation not supported here. "
+ "Consider using '${env:ACCUMULO_HOME}' or similar in your configuration file.");
}
return pathString;
}
/**
* Gets the maximum number of files per tablet from this configuration.
*
* @return maximum number of files per tablet
* @see Property#TABLE_FILE_MAX
* @see Property#TSERV_SCAN_MAX_OPENFILES
*/
public int getMaxFilesPerTablet() {
int maxFilesPerTablet = getCount(Property.TABLE_FILE_MAX);
if (maxFilesPerTablet <= 0) {
maxFilesPerTablet = getCount(Property.TSERV_SCAN_MAX_OPENFILES) - 1;
log.debug("Max files per tablet {}", maxFilesPerTablet);
}
return maxFilesPerTablet;
}
public class ScanExecutorConfig {
public final String name;
public final int maxThreads;
public final OptionalInt priority;
public final Optional<String> prioritizerClass;
public final Map<String,String> prioritizerOpts;
public final boolean isScanServer;
public ScanExecutorConfig(String name, int maxThreads, OptionalInt priority,
Optional<String> comparatorFactory, Map<String,String> comparatorFactoryOpts,
boolean isScanServer) {
this.name = name;
this.maxThreads = maxThreads;
this.priority = priority;
this.prioritizerClass = comparatorFactory;
this.prioritizerOpts = comparatorFactoryOpts;
this.isScanServer = isScanServer;
}
/**
* Re-reads the max threads from the configuration that created this class
*/
public int getCurrentMaxThreads() {
if (isScanServer) {
String prop =
Property.SSERV_SCAN_EXECUTORS_PREFIX.getKey() + name + "." + SCAN_EXEC_THREADS;
String val = getAllPropertiesWithPrefix(Property.SSERV_SCAN_EXECUTORS_PREFIX).get(prop);
return Integer.parseInt(val);
} else {
String prop =
Property.TSERV_SCAN_EXECUTORS_PREFIX.getKey() + name + "." + SCAN_EXEC_THREADS;
String val = getAllPropertiesWithPrefix(Property.TSERV_SCAN_EXECUTORS_PREFIX).get(prop);
return Integer.parseInt(val);
}
}
}
/**
* @param prop Property to check
* @return true if the given property has explicitly been set by a user, false otherwise
*/
public abstract boolean isPropertySet(Property prop);
private static class RefCount<T> {
T obj;
long count;
RefCount(long c, T r) {
this.count = c;
this.obj = r;
}
}
private class DeriverImpl<T> implements Deriver<T> {
private final AtomicReference<RefCount<T>> refref = new AtomicReference<>();
private final Function<AccumuloConfiguration,T> converter;
DeriverImpl(Function<AccumuloConfiguration,T> converter) {
this.converter = converter;
}
/**
* This method was written with the goal of avoiding thread contention and minimizing
* recomputation. Configuration can be accessed frequently by many threads. Ideally, threads
* working on unrelated tasks would not impede each other because of accessing config.
*
* To avoid thread contention, synchronization and needless calls to compare and set were
* avoided. For example if 100 threads are all calling compare and set in a loop this could
* cause significant contention.
*/
@Override
public T derive() {
// very important to obtain this before possibly recomputing object
long uc = getUpdateCount();
RefCount<T> rc = refref.get();
if (rc == null || rc.count != uc) {
T newObj = converter.apply(AccumuloConfiguration.this);
// very important to record the update count that was obtained before recomputing.
RefCount<T> nrc = new RefCount<>(uc, newObj);
/*
* The return value of compare and set is intentionally ignored here. This code could loop
* calling compare and set inorder to avoid returning a stale object. However after this
* function returns, the object could immediately become stale. So in the big picture stale
* objects can not be prevented. Looping here could cause thread contention, but it would
* not solve the overall stale object problem. That is why the return value was ignored. The
* following line is a least effort attempt to make the result of this recomputation
* available to the next caller.
*/
refref.compareAndSet(rc, nrc);
return nrc.obj;
}
return rc.obj;
}
}
/**
* Automatically regenerates an object whenever configuration changes. When configuration is not
* changing, keeps returning the same object. Implementations should be thread safe and eventually
* consistent. See {@link AccumuloConfiguration#newDeriver(Function)}
*/
public interface Deriver<T> {
T derive();
}
/**
* Enables deriving an object from configuration and automatically deriving a new object any time
* configuration changes.
*
* @param converter This functions is used to create an object from configuration. A reference to
* this function will be kept and called by the returned deriver.
* @return The returned supplier will automatically re-derive the object any time this
* configuration changes. When configuration is not changing, the same object is returned.
*
*/
public <T> Deriver<T> newDeriver(Function<AccumuloConfiguration,T> converter) {
return new DeriverImpl<>(converter);
}
private static final String SCAN_EXEC_THREADS = "threads";
private static final String SCAN_EXEC_PRIORITY = "priority";
private static final String SCAN_EXEC_PRIORITIZER = "prioritizer";
private static final String SCAN_EXEC_PRIORITIZER_OPTS = "prioritizer.opts.";
public Collection<ScanExecutorConfig> getScanExecutors(boolean isScanServer) {
Property prefix =
isScanServer ? Property.SSERV_SCAN_EXECUTORS_PREFIX : Property.TSERV_SCAN_EXECUTORS_PREFIX;
Map<String,Map<String,String>> propsByName = new HashMap<>();
List<ScanExecutorConfig> scanResources = new ArrayList<>();
for (Entry<String,String> entry : getAllPropertiesWithPrefix(prefix).entrySet()) {
String suffix = entry.getKey().substring(prefix.getKey().length());
String[] tokens = suffix.split("\\.", 2);
String name = tokens[0];
propsByName.computeIfAbsent(name, k -> new HashMap<>()).put(tokens[1], entry.getValue());
}
for (Entry<String,Map<String,String>> entry : propsByName.entrySet()) {
String name = entry.getKey();
Integer threads = null;
Integer prio = null;
String prioritizerClass = null;
Map<String,String> prioritizerOpts = new HashMap<>();
for (Entry<String,String> subEntry : entry.getValue().entrySet()) {
String opt = subEntry.getKey();
String val = subEntry.getValue();
if (opt.equals(SCAN_EXEC_THREADS)) {
threads = Integer.parseInt(val);
} else if (opt.equals(SCAN_EXEC_PRIORITY)) {
prio = Integer.parseInt(val);
} else if (opt.equals(SCAN_EXEC_PRIORITIZER)) {
prioritizerClass = val;
} else if (opt.startsWith(SCAN_EXEC_PRIORITIZER_OPTS)) {
String key = opt.substring(SCAN_EXEC_PRIORITIZER_OPTS.length());
if (key.isEmpty()) {
throw new IllegalStateException("Invalid scan executor option : " + opt);
}
prioritizerOpts.put(key, val);
} else {
throw new IllegalStateException("Unknown scan executor option : " + opt);
}
}
Preconditions.checkArgument(threads != null && threads > 0,
"Scan resource %s incorrectly specified threads", name);
scanResources.add(new ScanExecutorConfig(name, threads,
prio == null ? OptionalInt.empty() : OptionalInt.of(prio),
Optional.ofNullable(prioritizerClass), prioritizerOpts, isScanServer));
}
return scanResources;
}
/**
* Invalidates the <code>ZooCache</code> used for storage and quick retrieval of properties for
* this configuration.
*/
public void invalidateCache() {}
/**
* get a parent configuration or null if it does not exist.
*
* @since 2.1.0
*/
public AccumuloConfiguration getParent() {
return null;
}
public Stream<Entry<String,String>> stream() {
return StreamSupport.stream(this.spliterator(), false);
}
}
| 9,960 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/conf/cluster/ClusterConfigParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf.cluster;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.yaml.snakeyaml.Yaml;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class ClusterConfigParser {
private static final String PROPERTY_FORMAT = "%s=\"%s\"%n";
private static final String[] SECTIONS = new String[] {"manager", "monitor", "gc", "tserver"};
private static final Set<String> VALID_CONFIG_KEYS = Set.of("manager", "monitor", "gc", "tserver",
"tservers_per_host", "sservers_per_host", "compaction.coordinator");
private static final Set<String> VALID_CONFIG_PREFIXES =
Set.of("compaction.compactor.", "sserver.");
private static final Predicate<String> VALID_CONFIG_SECTIONS =
section -> VALID_CONFIG_KEYS.contains(section)
|| VALID_CONFIG_PREFIXES.stream().anyMatch(section::startsWith);
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths not set by user input")
public static Map<String,String> parseConfiguration(String configFile) throws IOException {
Map<String,String> results = new HashMap<>();
try (InputStream fis = Files.newInputStream(Paths.get(configFile), StandardOpenOption.READ)) {
Yaml y = new Yaml();
Map<String,Object> config = y.load(fis);
config.forEach((k, v) -> flatten("", k, v, results));
}
return results;
}
private static String addTheDot(String key) {
return key.endsWith(".") ? "" : ".";
}
private static void flatten(String parentKey, String key, Object value,
Map<String,String> results) {
String parent =
(parentKey == null || parentKey.equals("")) ? "" : parentKey + addTheDot(parentKey);
if (value instanceof String) {
results.put(parent + key, (String) value);
} else if (value instanceof List) {
((List<?>) value).forEach(l -> {
if (l instanceof String) {
// remove the [] at the ends of toString()
String val = value.toString();
results.put(parent + key, val.substring(1, val.length() - 1).replace(", ", " "));
} else {
flatten(parent, key, l, results);
}
});
} else if (value instanceof Map) {
@SuppressWarnings("unchecked")
Map<String,Object> map = (Map<String,Object>) value;
map.forEach((k, v) -> flatten(parent + key, k, v, results));
} else if (value instanceof Number) {
results.put(parent + key, value.toString());
} else {
throw new IllegalStateException("Unhandled object type: " + value.getClass());
}
}
public static void outputShellVariables(Map<String,String> config, PrintStream out) {
// find invalid config sections and point the user to the first one
config.keySet().stream().filter(VALID_CONFIG_SECTIONS.negate()).findFirst()
.ifPresent(section -> {
throw new IllegalArgumentException("Unknown configuration section : " + section);
});
for (String section : SECTIONS) {
if (config.containsKey(section)) {
out.printf(PROPERTY_FORMAT, section.toUpperCase() + "_HOSTS", config.get(section));
} else {
if (section.equals("manager") || section.equals("tserver")) {
throw new IllegalStateException("Required configuration section is missing: " + section);
}
System.err.println("WARN: " + section + " is missing");
}
}
if (config.containsKey("compaction.coordinator")) {
out.printf(PROPERTY_FORMAT, "COORDINATOR_HOSTS", config.get("compaction.coordinator"));
}
String compactorPrefix = "compaction.compactor.";
Set<String> compactorQueues =
config.keySet().stream().filter(k -> k.startsWith(compactorPrefix))
.map(k -> k.substring(compactorPrefix.length())).collect(Collectors.toSet());
if (!compactorQueues.isEmpty()) {
out.printf(PROPERTY_FORMAT, "COMPACTION_QUEUES",
compactorQueues.stream().collect(Collectors.joining(" ")));
for (String queue : compactorQueues) {
out.printf(PROPERTY_FORMAT, "COMPACTOR_HOSTS_" + queue,
config.get("compaction.compactor." + queue));
}
}
String sserverPrefix = "sserver.";
Set<String> sserverGroups = config.keySet().stream().filter(k -> k.startsWith(sserverPrefix))
.map(k -> k.substring(sserverPrefix.length())).collect(Collectors.toSet());
if (!sserverGroups.isEmpty()) {
out.printf(PROPERTY_FORMAT, "SSERVER_GROUPS",
sserverGroups.stream().collect(Collectors.joining(" ")));
sserverGroups.forEach(ssg -> out.printf(PROPERTY_FORMAT, "SSERVER_HOSTS_" + ssg,
config.get(sserverPrefix + ssg)));
}
String numTservers = config.getOrDefault("tservers_per_host", "1");
out.print("NUM_TSERVERS=\"${NUM_TSERVERS:=" + numTservers + "}\"\n");
String numSservers = config.getOrDefault("sservers_per_host", "1");
out.print("NUM_SSERVERS=\"${NUM_SSERVERS:=" + numSservers + "}\"\n");
out.flush();
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "Path provided for output file is intentional")
public static void main(String[] args) throws IOException {
if (args == null || args.length < 1 || args.length > 2) {
System.err.println("Usage: ClusterConfigParser <configFile> [<outputFile>]");
System.exit(1);
}
if (args.length == 2) {
// Write to a file instead of System.out if provided as an argument
try (OutputStream os = Files.newOutputStream(Paths.get(args[1]), StandardOpenOption.CREATE);
PrintStream out = new PrintStream(os)) {
outputShellVariables(parseConfiguration(args[0]), new PrintStream(out));
}
} else {
outputShellVariables(parseConfiguration(args[0]), System.out);
}
}
}
| 9,961 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/volume/VolumeImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.volume;
import static java.util.Objects.requireNonNull;
import java.io.IOException;
import java.util.Objects;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Basic Volume implementation that contains a FileSystem and a base path that should be used within
* that filesystem.
*/
public class VolumeImpl implements Volume {
private static final Logger log = LoggerFactory.getLogger(VolumeImpl.class);
private final FileSystem fs;
private final String basePath;
private final Configuration hadoopConf;
public VolumeImpl(Path path, Configuration hadoopConf) throws IOException {
this.fs = requireNonNull(path).getFileSystem(requireNonNull(hadoopConf));
this.basePath = stripTrailingSlashes(path.toUri().getPath());
this.hadoopConf = hadoopConf;
}
public VolumeImpl(FileSystem fs, String basePath) {
this.fs = requireNonNull(fs);
this.basePath = stripTrailingSlashes(requireNonNull(basePath));
this.hadoopConf = fs.getConf();
}
// remove any trailing whitespace or slashes
private static String stripTrailingSlashes(String path) {
return path.strip().replaceAll("/*$", "");
}
@Override
public FileSystem getFileSystem() {
return fs;
}
@Override
public String getBasePath() {
return basePath;
}
@Override
public boolean containsPath(Path path) {
FileSystem otherFS;
try {
otherFS = requireNonNull(path).getFileSystem(hadoopConf);
} catch (IOException e) {
log.warn("Could not determine filesystem from path: {}", path, e);
return false;
}
return equivalentFileSystems(otherFS) && isAncestorPathOf(path);
}
// same if the only difference is trailing slashes
boolean equivalentFileSystems(FileSystem otherFS) {
return stripTrailingSlashes(fs.getUri().toString())
.equals(stripTrailingSlashes(otherFS.getUri().toString()));
}
// is ancestor if the path portion without the filesystem scheme
// is a subdirectory of this volume's basePath
boolean isAncestorPathOf(Path other) {
String otherPath = other.toUri().getPath().strip();
if (otherPath.startsWith(basePath)) {
String otherRemainingPath = otherPath.substring(basePath.length());
return otherRemainingPath.isEmpty()
|| (otherRemainingPath.startsWith("/") && !otherRemainingPath.contains(".."));
}
return false;
}
@Override
public int hashCode() {
return Objects.hashCode(getFileSystem()) + Objects.hashCode(getBasePath());
}
@Override
public boolean equals(Object o) {
if (o instanceof VolumeImpl) {
VolumeImpl other = (VolumeImpl) o;
return getFileSystem().equals(other.getFileSystem())
&& getBasePath().equals(other.getBasePath());
}
return false;
}
@Override
public String toString() {
return fs.makeQualified(new Path(basePath)).toString();
}
@Override
public Path prefixChild(String pathString) {
String p = requireNonNull(pathString).strip();
p = p.startsWith("/") ? p.substring(1) : p;
String reason;
if (basePath.isBlank()) {
log.error("Basepath is empty. Make sure instance.volumes is set to a correct path");
throw new IllegalArgumentException(
"Accumulo cannot be initialized because basepath is empty. "
+ "This probably means instance.volumes is an incorrect value");
}
if (p.isBlank()) {
return fs.makeQualified(new Path(basePath));
} else if (p.startsWith("/")) {
// check for starting with '//'
reason = "absolute path";
} else if (pathString.contains(":")) {
reason = "qualified path";
} else if (pathString.contains("..")) {
reason = "path contains '..'";
} else {
return fs.makeQualified(new Path(basePath, p));
}
throw new IllegalArgumentException(
String.format("Cannot prefix %s (%s) with volume %s", pathString, reason, this));
}
}
| 9,962 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/volume/VolumeConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.volume;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class VolumeConfiguration {
public static FileSystem fileSystemForPath(String path, Configuration conf) throws IOException {
return path.contains(":") ? new Path(path).getFileSystem(conf) : FileSystem.get(conf);
}
public static Set<String> getVolumeUris(AccumuloConfiguration conf) {
String volumes = conf.get(Property.INSTANCE_VOLUMES);
if (volumes == null || volumes.isBlank()) {
throw new IllegalArgumentException(
"Missing required property " + Property.INSTANCE_VOLUMES.getKey());
}
String[] volArray = volumes.split(",");
LinkedHashSet<String> deduplicated =
Arrays.stream(volArray).map(VolumeConfiguration::normalizeVolume)
.collect(Collectors.toCollection(LinkedHashSet::new));
if (deduplicated.size() < volArray.length) {
throw new IllegalArgumentException(
Property.INSTANCE_VOLUMES.getKey() + " contains duplicate volumes (" + volumes + ")");
}
return deduplicated;
}
private static String normalizeVolume(String volume) {
if (volume == null || volume.isBlank() || !volume.contains(":")) {
throw new IllegalArgumentException("Expected fully qualified URI for "
+ Property.INSTANCE_VOLUMES.getKey() + " got " + volume);
}
try {
// pass through URI to unescape hex encoded chars (e.g. convert %2C to "," char)
return new Path(new URI(volume.strip())).toString();
} catch (URISyntaxException e) {
throw new IllegalArgumentException(Property.INSTANCE_VOLUMES.getKey() + " contains '" + volume
+ "' which has a syntax error", e);
}
}
}
| 9,963 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/volume/Volume.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.volume;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/**
* Encapsulates a {@link FileSystem} and a base {@link Path} within that filesystem. This also avoid
* the necessity to pass around a Configuration.
*/
public interface Volume {
/**
* A {@link FileSystem} that Accumulo will use
*/
FileSystem getFileSystem();
/**
* The base path which Accumulo will use within the given {@link FileSystem}
*/
String getBasePath();
/**
* Convert the given child path into a Path that is relative to the base path for this Volume. The
* supplied path should not include any scheme (such as <code>file:</code> or <code>hdfs:</code>),
* and should not contain any relative path "breakout" patterns, such as <code>../</code>. If the
* path begins with a single slash, it will be preserved while prefixing this volume. If it does
* not begin with a single slash, one will be inserted.
*
* @param pathString The suffix to use
* @return A Path for this Volume with the provided suffix
*/
Path prefixChild(String pathString);
/**
* Determine if the Path is contained in Volume. A Path is considered contained if refers to a
* location within the base path for this Volume on the same FileSystem. It can be located at the
* base path, or within any sub-directory. Unqualified paths (those without a file system scheme)
* will resolve to using the configured Hadoop default file system before comparison. Paths are
* not considered "contained" within this Volume if they have any relative path "breakout"
* patterns, such as <code>../</code>.
*/
boolean containsPath(Path path);
}
| 9,964 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/AccumuloException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
/**
* A generic Accumulo Exception for general accumulo failures.
*/
public class AccumuloException extends Exception {
private static final long serialVersionUID = 1L;
/**
* @param why is the reason for the error being thrown
*/
public AccumuloException(final String why) {
super(why);
}
/**
* @param cause is the exception that this exception wraps
*/
public AccumuloException(final Throwable cause) {
super(cause);
}
/**
* @param why is the reason for the error being thrown
* @param cause is the exception that this exception wraps
*/
public AccumuloException(final String why, final Throwable cause) {
super(why, cause);
}
}
| 9,965 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/NamespaceExistsException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
/**
* Thrown when the namespace specified already exists, and it was expected that it didn't
*/
public class NamespaceExistsException extends Exception {
/**
* Exception to throw if an operation is attempted on a namespace that already exists.
*/
private static final long serialVersionUID = 1L;
/**
* @param namespaceId the internal id of the namespace that exists
* @param namespaceName the visible name of the namespace that exists
* @param description the specific reason why it failed
*/
public NamespaceExistsException(String namespaceId, String namespaceName, String description) {
super(
"Namespace" + (namespaceName != null && !namespaceName.isEmpty() ? " " + namespaceName : "")
+ (namespaceId != null && !namespaceId.isEmpty() ? " (Id=" + namespaceId + ")" : "")
+ " exists"
+ (description != null && !description.isEmpty() ? " (" + description + ")" : ""));
}
/**
* @param namespaceId the internal id of the namespace that exists
* @param namespaceName the visible name of the namespace that exists
* @param description the specific reason why it failed
* @param cause the exception that caused this failure
*/
public NamespaceExistsException(String namespaceId, String namespaceName, String description,
Throwable cause) {
this(namespaceId, namespaceName, description);
super.initCause(cause);
}
/**
* @param e constructs an exception from a thrift exception
*/
public NamespaceExistsException(ThriftTableOperationException e) {
this(e.getTableId(), e.getTableName(), e.getDescription(), e);
}
}
| 9,966 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/MutationsRejectedException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.core.client.security.SecurityErrorCode;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.data.ConstraintViolationSummary;
import org.apache.accumulo.core.data.TabletId;
/**
* Communicate the failed mutations of a BatchWriter back to the client.
*/
public class MutationsRejectedException extends AccumuloException {
private static final long serialVersionUID = 1L;
private final ArrayList<ConstraintViolationSummary> cvsl = new ArrayList<>();
private final HashMap<TabletId,Set<SecurityErrorCode>> af = new HashMap<>();
private final HashSet<String> es = new HashSet<>();
private final int unknownErrors;
/**
* Creates Mutations rejected exception
*
* @param client AccumuloClient
* @param cvsList list of constraint violations
* @param hashMap authorization failures
* @param serverSideErrors server side errors
* @param unknownErrors number of unknown errors
*
* @since 2.0.0
*/
public MutationsRejectedException(AccumuloClient client, List<ConstraintViolationSummary> cvsList,
Map<TabletId,Set<SecurityErrorCode>> hashMap, Collection<String> serverSideErrors,
int unknownErrors, Throwable cause) {
super("# constraint violations : " + cvsList.size() + " security codes: "
+ format(hashMap, (ClientContext) client) + " # server errors " + serverSideErrors.size()
+ " # exceptions " + unknownErrors, cause);
this.cvsl.addAll(cvsList);
this.af.putAll(hashMap);
this.es.addAll(serverSideErrors);
this.unknownErrors = unknownErrors;
}
private static String format(Map<TabletId,Set<SecurityErrorCode>> hashMap,
ClientContext context) {
Map<String,Set<SecurityErrorCode>> result = new HashMap<>();
for (Entry<TabletId,Set<SecurityErrorCode>> entry : hashMap.entrySet()) {
TabletId tabletId = entry.getKey();
String tableInfo = context.getPrintableTableInfoFromId(tabletId.getTable());
if (!result.containsKey(tableInfo)) {
result.put(tableInfo, new HashSet<>());
}
result.get(tableInfo).addAll(hashMap.get(tabletId));
}
return result.toString();
}
/**
* @return the internal list of constraint violations
*/
public List<ConstraintViolationSummary> getConstraintViolationSummaries() {
return cvsl;
}
/**
* @return the internal mapping of TabletID to SecurityErrorCodes
*/
public Map<TabletId,Set<SecurityErrorCode>> getSecurityErrorCodes() {
return af;
}
/**
*
* @return A list of servers that had internal errors when mutations were written
*
*/
public Collection<String> getErrorServers() {
return es;
}
/**
*
* @return a count of unknown exceptions that occurred during processing
*/
public int getUnknownExceptions() {
return unknownErrors;
}
}
| 9,967 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/AccumuloSecurityException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode;
import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
import org.apache.commons.lang3.StringUtils;
/**
* An Accumulo Exception for security violations, authentication failures, authorization failures,
* etc.
*/
public class AccumuloSecurityException extends Exception {
private static final long serialVersionUID = 1L;
private static String getDefaultErrorMessage(final SecurityErrorCode errorcode) {
switch (errorcode == null ? SecurityErrorCode.DEFAULT_SECURITY_ERROR : errorcode) {
case BAD_CREDENTIALS:
return "Username or Password is Invalid";
case CONNECTION_ERROR:
return "Connection Error Occurred";
case PERMISSION_DENIED:
return "User does not have permission to perform this action";
case USER_DOESNT_EXIST:
return "The user does not exist";
case USER_EXISTS:
return "The user exists";
case GRANT_INVALID:
return "The GRANT permission cannot be granted or revoked";
case BAD_AUTHORIZATIONS:
return "The user does not have the specified authorizations assigned";
case UNSUPPORTED_OPERATION:
return "The configured security handler does not support this operation";
case INVALID_TOKEN:
return "The configured authenticator does not accept this type of token";
case AUTHENTICATOR_FAILED:
return "The configured authenticator failed for some reason";
case AUTHORIZOR_FAILED:
return "The configured authorizor failed for some reason";
case PERMISSIONHANDLER_FAILED:
return "The configured permission handler failed for some reason";
case TOKEN_EXPIRED:
return "The supplied token expired, please update and try again";
case INSUFFICIENT_PROPERTIES:
return "The login properties supplied are not sufficient for authentication. "
+ "Please check the requested properties and try again";
case DEFAULT_SECURITY_ERROR:
default:
return "Unknown security exception";
}
}
private String user;
private String tableInfo;
private SecurityErrorCode errorCode;
/**
* @return this exception as a thrift exception
*/
public ThriftSecurityException asThriftException() {
return new ThriftSecurityException(user, errorCode);
}
/**
* Construct a user-facing exception from a serialized version.
*
* @param thrift a serialized version
*/
public AccumuloSecurityException(final ThriftSecurityException thrift) {
this(thrift.getUser(), thrift.getCode(), thrift);
}
/**
* @param user the relevant user for the security violation
* @param errorcode the specific reason for this exception
* @param cause the exception that caused this violation
*/
public AccumuloSecurityException(final String user, final SecurityErrorCode errorcode,
final Throwable cause) {
super(getDefaultErrorMessage(errorcode), cause);
this.user = user;
this.errorCode = errorcode == null ? SecurityErrorCode.DEFAULT_SECURITY_ERROR : errorcode;
}
/**
* @param user the relevant user for the security violation
* @param errorcode the specific reason for this exception
* @param tableInfo the relevant tableInfo for the security violation
* @param cause the exception that caused this violation
*/
public AccumuloSecurityException(final String user, final SecurityErrorCode errorcode,
final String tableInfo, final Throwable cause) {
super(getDefaultErrorMessage(errorcode), cause);
this.user = user;
this.errorCode = errorcode == null ? SecurityErrorCode.DEFAULT_SECURITY_ERROR : errorcode;
this.tableInfo = tableInfo;
}
/**
* @param user the relevant user for the security violation
* @param errorcode the specific reason for this exception
*/
public AccumuloSecurityException(final String user, final SecurityErrorCode errorcode) {
super(getDefaultErrorMessage(errorcode));
this.user = user;
this.errorCode = errorcode == null ? SecurityErrorCode.DEFAULT_SECURITY_ERROR : errorcode;
}
/**
* @param user the relevant user for the security violation
* @param errorcode the specific reason for this exception
* @param tableInfo the relevant tableInfo for the security violation
*/
public AccumuloSecurityException(final String user, final SecurityErrorCode errorcode,
final String tableInfo) {
super(getDefaultErrorMessage(errorcode));
this.user = user;
this.errorCode = errorcode == null ? SecurityErrorCode.DEFAULT_SECURITY_ERROR : errorcode;
this.tableInfo = tableInfo;
}
/**
* @return the relevant user for the security violation
*/
public String getUser() {
return user;
}
public void setUser(String s) {
this.user = s;
}
/**
* @return the relevant tableInfo for the security violation
*/
public String getTableInfo() {
return tableInfo;
}
public void setTableInfo(String tableInfo) {
this.tableInfo = tableInfo;
}
/**
* @return the specific reason for this exception
* @since 1.5.0
*/
public org.apache.accumulo.core.client.security.SecurityErrorCode getSecurityErrorCode() {
return org.apache.accumulo.core.client.security.SecurityErrorCode.valueOf(errorCode.name());
}
@Override
public String getMessage() {
StringBuilder message = new StringBuilder();
message.append("Error ").append(errorCode);
message.append(" for user ").append(user);
if (!StringUtils.isEmpty(tableInfo)) {
message.append(" on table ").append(tableInfo);
}
message.append(" - ").append(super.getMessage());
return message.toString();
}
}
| 9,968 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/TableDeletedException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
/**
* This exception is thrown if a table is deleted after an operation starts.
*
* For example if table A exist when a scan is started, but is deleted during the scan then this
* exception is thrown.
*/
public class TableDeletedException extends RuntimeException {
private static final long serialVersionUID = 1L;
private String tableId;
public TableDeletedException(String tableId) {
super("Table ID " + tableId + " was deleted");
this.tableId = tableId;
}
/**
* @since 2.0.0
*/
public TableDeletedException(String tableId, Exception cause) {
super("Table ID " + tableId + " was deleted", cause);
this.tableId = tableId;
}
public String getTableId() {
return tableId;
}
}
| 9,969 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/Durability.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
/**
* The value for the durability of a BatchWriter or ConditionalWriter.
*
* @since 1.7.0
*/
public enum Durability {
// Note, the order of these is important; the "highest" Durability is used in group commits.
/**
* Use the durability as specified by the table or system configuration.
*/
DEFAULT,
/**
* Don't bother writing mutations to the write-ahead log.
*/
NONE,
/**
* Write mutations the the write-ahead log. Data may be sitting the the servers output buffers,
* and not replicated anywhere.
*/
LOG,
/**
* Write mutations to the write-ahead log, and ensure the data is stored on remote servers, but
* perhaps not on persistent storage.
*/
FLUSH,
/**
* Write mutations to the write-ahead log, and ensure the data is saved to persistent storage.
*/
SYNC
}
| 9,970 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/PluginEnvironment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.function.Function;
import java.util.function.Supplier;
import org.apache.accumulo.core.data.TableId;
/**
* This interface exposes Accumulo system level information to plugins in a stable manner. The
* purpose of this interface is to insulate plugins from internal refactorings and changes to
* Accumulo.
*
* @since 2.1.0
*/
public interface PluginEnvironment {
/**
* @since 2.1.0
*/
interface Configuration extends Iterable<Entry<String,String>> {
/**
* Properties with a default value will always return something when calling
* {@link #get(String)}, even if a user never set the property. The method allows checking if a
* user set a property.
*
* @return true if a user set this property and false if a user did not set it.
* @since 2.1.0
*/
boolean isSet(String key);
/**
* @return The value for a single property or null if not present. Sensitive properties are
* intentionally not returned in order to prevent inadvertent logging of them. If your
* plugin needs sensitive properties a getSensitive method could be added.
*/
String get(String key);
/**
* Returns all properties with a given prefix
*
* @param prefix prefix of properties to be returned. Include the trailing '.' in the prefix.
* @return all properties with a given prefix
* @since 2.1.0
*/
Map<String,String> getWithPrefix(String prefix);
/**
* Users can set arbitrary custom properties in Accumulo using the prefix
* {@code general.custom.}. This method will return all properties with that prefix, stripping
* the prefix. For example, assume the following properties were set :
*
* <pre>
* {@code
* general.custom.prop1=123
* general.custom.prop2=abc
* }
* </pre>
*
* Then this function would return a map containing {@code [prop1=123,prop2=abc]}.
*
*/
Map<String,String> getCustom();
/**
* This method appends the prefix {@code general.custom} and gets the property.
*
* @return The same as calling {@code getCustom().get(keySuffix)} OR
* {@code get("general.custom."+keySuffix)}
*/
String getCustom(String keySuffix);
/**
* Users can set arbitrary custom table properties in Accumulo using the prefix
* {@code table.custom.}. This method will return all properties with that prefix, stripping the
* prefix. For example, assume the following properties were set :
*
* <pre>
* {@code
* table.custom.tp1=ch1
* table.custom.tp2=bh2
* }
* </pre>
*
* Then this function would return a map containing {@code [tp1=ch1,tp2=bh2]}.
*
*/
Map<String,String> getTableCustom();
/**
* This method appends the prefix {@code table.custom} and gets the property.
*
* @return The same as calling {@code getTableCustom().get(keySuffix)} OR
* {@code get("table.custom."+keySuffix)}
*/
String getTableCustom(String keySuffix);
/**
* Returns an iterator over all properties. This may be inefficient, consider opening an issue
* if you have a use case that is only satisfied by this. Sensitive properties are intentionally
* suppressed in order to prevent inadvertent logging of them.
*/
@Override
Iterator<Entry<String,String>> iterator();
/**
* Returns a derived value from this Configuration. The returned value supplier is thread-safe
* and attempts to avoid re-computation of the response. The intended use for a derived value is
* to ensure that configuration changes that may be made in Zookeeper, for example, are always
* reflected in the returned value.
*/
<T> Supplier<T> getDerived(Function<Configuration,T> computeDerivedValue);
}
/**
* @return A view of Accumulo's system level configuration. This is backed by system level config
* in zookeeper, which falls back to site configuration, which falls back to the default
* configuration.
*/
Configuration getConfiguration();
/**
* @return a view of a table's configuration. When requesting properties that start with
* {@code table.} the returned configuration may give different values for different
* tables. For other properties the returned configuration will return the same value as
* {@link #getConfiguration()}.
*
*/
Configuration getConfiguration(TableId tableId);
/**
* Many Accumulo plugins are given table IDs as this is what Accumulo uses internally to identify
* tables. If a plugin needs to log debugging information it can call this method to get the table
* name.
*/
String getTableName(TableId tableId) throws TableNotFoundException;
/**
* Instantiate a class using Accumulo's system classloader. The class must have a no argument
* constructor.
*
* @param className Fully qualified name of the class.
* @param base The expected super type of the class.
*/
<T> T instantiate(String className, Class<T> base) throws ReflectiveOperationException;
/**
* Instantiate a class using Accumulo's per table classloader. The class must have a no argument
* constructor.
*
* @param className Fully qualified name of the class.
* @param base The expected super type of the class.
*/
<T> T instantiate(TableId tableId, String className, Class<T> base)
throws ReflectiveOperationException;
}
| 9,971 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/Scanner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import org.apache.accumulo.core.data.Range;
/**
* Scans a table over a given range.
*
* "Clients can iterate over multiple column families, and there are several mechanisms for limiting
* the rows, columns, and timestamps traversed by a scan. For example, we could restrict [a] scan
* ... to only produce anchors whose columns match [a] regular expression ..., or to only produce
* anchors whose timestamps fall within ten days of the current time."
*/
public interface Scanner extends ScannerBase {
/**
* Sets the range of keys to scan over.
*
* @param range key range to begin and end scan
*/
void setRange(Range range);
/**
* Returns the range of keys to scan over.
*
* @return the range configured for this scanner
*/
Range getRange();
/**
* Sets the number of Key/Value pairs that will be fetched at a time from a tablet server.
*
* @param size the number of Key/Value pairs to fetch per call to Accumulo
*/
void setBatchSize(int size);
/**
* Returns the batch size (number of Key/Value pairs) that will be fetched at a time from a tablet
* server.
*
* @return the batch size configured for this scanner
*/
int getBatchSize();
/**
* Enables row isolation. Writes that occur to a row after a scan of that row has begun will not
* be seen if this option is enabled.
*/
void enableIsolation();
/**
* Disables row isolation. Writes that occur to a row after a scan of that row has begun may be
* seen if this option is enabled.
*/
void disableIsolation();
/**
* The number of batches of Key/Value pairs returned before the {@link Scanner} will begin to
* prefetch the next batch
*
* @return Number of batches before read-ahead begins
* @since 1.6.0
*/
long getReadaheadThreshold();
/**
* Sets the number of batches of Key/Value pairs returned before the {@link Scanner} will begin to
* prefetch the next batch
*
* @param batches Non-negative number of batches
* @since 1.6.0
*/
void setReadaheadThreshold(long batches);
}
| 9,972 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/IteratorSetting.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import static com.google.common.base.Preconditions.checkArgument;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.util.Pair;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
* Configure an iterator for minc, majc, and/or scan. By default, IteratorSetting will be configured
* for scan.
*
* Every iterator has a priority, a name, a class, a set of scopes, and configuration parameters.
*
* A typical use case configured for scan:
*
* <pre>
* IteratorSetting cfg = new IteratorSetting(priority, "myIter", MyIterator.class);
* MyIterator.addOption(cfg, 42);
* scanner.addScanIterator(cfg);
* </pre>
*/
public class IteratorSetting implements Writable {
private int priority;
private String name;
private String iteratorClass;
private Map<String,String> properties;
/**
* Get layer at which this iterator applies. See {@link #setPriority(int)} for how the priority is
* used.
*
* @return the priority of this Iterator
*/
public int getPriority() {
return priority;
}
/**
* Set layer at which this iterator applies.
*
* @param priority determines the order in which iterators are applied (system iterators are
* always applied first, then user-configured iterators, lowest priority first)
*/
public void setPriority(int priority) {
checkArgument(priority > 0, "property must be strictly positive");
this.priority = priority;
}
/**
* Get the iterator's name.
*
* @return the name of the iterator
*/
public String getName() {
return name;
}
/**
* Set the iterator's name. Must be a simple alphanumeric identifier. The iterator name also may
* not contain a dot/period.
*/
public void setName(String name) {
checkArgument(name != null, "name is null");
checkArgument(!name.contains("."), "Iterator name cannot contain a dot/period: " + name);
this.name = name;
}
/**
* Get the name of the class that implements the iterator.
*
* @return the iterator's class name
*/
public String getIteratorClass() {
return iteratorClass;
}
/**
* Set the name of the class that implements the iterator. The class does not have to be present
* on the client, but it must be available to all tablet servers.
*/
public void setIteratorClass(String iteratorClass) {
checkArgument(iteratorClass != null, "iteratorClass is null");
this.iteratorClass = iteratorClass;
}
/**
* Constructs an iterator setting configured for the scan scope with no parameters. (Parameters
* can be added later.)
*
* @param priority the priority for the iterator (see {@link #setPriority(int)})
* @param name the distinguishing name for the iterator
* @param iteratorClass the fully qualified class name for the iterator
*/
public IteratorSetting(int priority, String name, String iteratorClass) {
this(priority, name, iteratorClass, new HashMap<>());
}
/**
* Constructs an iterator setting configured for the specified scopes with the specified
* parameters.
*
* @param priority the priority for the iterator (see {@link #setPriority(int)})
* @param name the distinguishing name for the iterator
* @param iteratorClass the fully qualified class name for the iterator
* @param properties any properties for the iterator
*/
public IteratorSetting(int priority, String name, String iteratorClass,
Map<String,String> properties) {
setPriority(priority);
setName(name);
setIteratorClass(iteratorClass);
this.properties = new HashMap<>();
addOptions(properties);
}
/**
* Constructs an iterator setting using the given class's SimpleName for the iterator name. The
* iterator setting will be configured for the scan scope with no parameters.
*
* @param priority the priority for the iterator (see {@link #setPriority(int)})
* @param iteratorClass the class for the iterator
*/
public IteratorSetting(int priority,
Class<? extends SortedKeyValueIterator<Key,Value>> iteratorClass) {
this(priority, iteratorClass.getSimpleName(), iteratorClass.getName());
}
/**
*
* Constructs an iterator setting using the given class's SimpleName for the iterator name and
* configured for the specified scopes with the specified parameters.
*
* @param priority the priority for the iterator (see {@link #setPriority(int)})
* @param iteratorClass the class for the iterator
* @param properties any properties for the iterator
*/
public IteratorSetting(int priority,
Class<? extends SortedKeyValueIterator<Key,Value>> iteratorClass,
Map<String,String> properties) {
this(priority, iteratorClass.getSimpleName(), iteratorClass.getName(), properties);
}
/**
* Constructs an iterator setting configured for the scan scope with no parameters.
*
* @param priority the priority for the iterator (see {@link #setPriority(int)})
* @param name the distinguishing name for the iterator
* @param iteratorClass the class for the iterator
*/
public IteratorSetting(int priority, String name,
Class<? extends SortedKeyValueIterator<Key,Value>> iteratorClass) {
this(priority, name, iteratorClass.getName());
}
/**
* Constructs an iterator setting using the provided name and the provided class's name for the
* scan scope with the provided parameters.
*
* @param priority The priority for the iterator (see {@link #setPriority(int)})
* @param name The distinguishing name for the iterator
* @param iteratorClass The class for the iterator
* @param properties Any properties for the iterator
*
* @since 1.6.0
*/
public IteratorSetting(int priority, String name,
Class<? extends SortedKeyValueIterator<Key,Value>> iteratorClass,
Map<String,String> properties) {
this(priority, name, iteratorClass.getName(), properties);
}
/**
* @since 1.5.0
*/
public IteratorSetting(DataInput din) throws IOException {
this.properties = new HashMap<>();
this.readFields(din);
}
/**
* Add another option to the iterator.
*
* @param option the name of the option
* @param value the value of the option
*/
public void addOption(String option, String value) {
checkArgument(option != null, "option is null");
checkArgument(value != null, "value is null");
properties.put(option, value);
}
/**
* Remove an option from the iterator.
*
* @param option the name of the option
* @return the value previously associated with the option, or null if no such option existed
*/
public String removeOption(String option) {
checkArgument(option != null, "option is null");
return properties.remove(option);
}
/**
* Add many options to the iterator.
*
* @param propertyEntries a set of entries to add to the options
*/
public void addOptions(Set<Entry<String,String>> propertyEntries) {
checkArgument(propertyEntries != null, "propertyEntries is null");
for (Entry<String,String> keyValue : propertyEntries) {
addOption(keyValue.getKey(), keyValue.getValue());
}
}
/**
* Add many options to the iterator.
*
* @param properties a map of entries to add to the options
*/
public void addOptions(Map<String,String> properties) {
checkArgument(properties != null, "properties is null");
addOptions(properties.entrySet());
}
/**
* Get the configuration parameters for this iterator.
*
* @return the properties
*/
public Map<String,String> getOptions() {
return Collections.unmodifiableMap(properties);
}
/**
* Remove all options from the iterator.
*/
public void clearOptions() {
properties.clear();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((iteratorClass == null) ? 0 : iteratorClass.hashCode());
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + priority;
result = prime * result + ((properties == null) ? 0 : properties.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof IteratorSetting)) {
return false;
}
IteratorSetting other = (IteratorSetting) obj;
if (iteratorClass == null) {
if (other.iteratorClass != null) {
return false;
}
} else if (!iteratorClass.equals(other.iteratorClass)) {
return false;
}
if (name == null) {
if (other.name != null) {
return false;
}
} else if (!name.equals(other.name)) {
return false;
}
if (priority != other.priority) {
return false;
}
if (properties == null) {
return other.properties == null;
} else {
return properties.equals(other.properties);
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("name:");
sb.append(name);
sb.append(", priority:");
sb.append(priority);
sb.append(", class:");
sb.append(iteratorClass);
sb.append(", properties:");
sb.append(properties);
return sb.toString();
}
/**
* A convenience class for passing column family and column qualifiers to iterator configuration
* methods.
*/
public static class Column extends Pair<Text,Text> {
public Column(Text columnFamily, Text columnQualifier) {
super(columnFamily, columnQualifier);
}
public Column(Text columnFamily) {
super(columnFamily, null);
}
public Column(String columnFamily, String columnQualifier) {
super(new Text(columnFamily), new Text(columnQualifier));
}
public Column(String columnFamily) {
super(new Text(columnFamily), null);
}
public Text getColumnFamily() {
return getFirst();
}
public Text getColumnQualifier() {
return getSecond();
}
}
/**
* @since 1.5.0
* @see Writable
*/
@Override
public void readFields(DataInput din) throws IOException {
priority = WritableUtils.readVInt(din);
name = WritableUtils.readString(din);
iteratorClass = WritableUtils.readString(din);
properties.clear();
int size = WritableUtils.readVInt(din);
while (size > 0) {
properties.put(WritableUtils.readString(din), WritableUtils.readString(din));
size--;
}
}
/**
* @since 1.5.0
* @see Writable
*/
@Override
public void write(DataOutput dout) throws IOException {
WritableUtils.writeVInt(dout, priority);
WritableUtils.writeString(dout, name);
WritableUtils.writeString(dout, iteratorClass);
WritableUtils.writeVInt(dout, properties.size());
for (Entry<String,String> e : properties.entrySet()) {
WritableUtils.writeString(dout, e.getKey());
WritableUtils.writeString(dout, e.getValue());
}
}
}
| 9,973 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/Accumulo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import java.util.Properties;
import org.apache.accumulo.core.client.lexicoder.Lexicoder;
import org.apache.accumulo.core.client.rfile.RFile;
import org.apache.accumulo.core.clientImpl.ClientContext;
/**
* This class contains all API entry points created in 2.0.0 or later. The majority of the API is
* accessible indirectly via methods in this class. Below are a list of APIs entry points that are
* not accessible from here.
*
* <ul>
* <li>Hadoop input, output formats and partitioners in {@code org.apache.accumulo.hadoop.mapred}
* and {@code org.apache.accumulo.hadoop.mapreduce} packages.
* <li>{@code org.apache.accumulo.minicluster.MiniAccumuloCluster} Not linkable by javadoc, because
* in a separate module.
* <li>{@link Lexicoder} and all of its implementations in the same package.
* <li>{@link RFile}
* </ul>
*
* @see <a href="https://accumulo.apache.org/">Accumulo Website</a>
* @see <a href="https://accumulo.apache.org/api">Accumulo Public API</a>
* @see <a href="https://semver.org/spec/v2.0.0">Semver 2.0</a>
* @since 2.0.0
*/
public final class Accumulo {
private Accumulo() {}
/**
* Fluent entry point for creating an {@link AccumuloClient}. For example:
*
* <pre>
* <code>
* // Create client directly from connection information
* try (AccumuloClient client = Accumulo.newClient()
* .to(instanceName, zookeepers)
* .as(user, password).build())
* {
* // use the client
* }
*
* // Create client using the instance name, zookeeper, and credentials from java properties or properties file
* try (AccumuloClient client = Accumulo.newClient()
* .from(properties).build())
* {
* // use the client
* }
* </code>
* </pre>
*
* For a list of all client properties, see the documentation on the
* <a href="https://accumulo.apache.org/docs/2.x/configuration/client-properties">Accumulo
* website</a>
*
* @return a builder object for Accumulo clients
*/
public static AccumuloClient.PropertyOptions<AccumuloClient> newClient() {
return new ClientContext.ClientBuilderImpl<>(ClientContext.ClientBuilderImpl::buildClient);
}
/**
* Fluent entry point for creating client {@link Properties}. For example:
*
* <pre>
* <code>
* Properties clientProperties = Accumulo.newClientProperties()
* .to(instanceName, zookeepers)
* .as(user, password).build())
* </code>
* </pre>
*
* For a list of all client properties, see the documentation on the
* <a href="https://accumulo.apache.org/docs/2.x/configuration/client-properties">Accumulo
* website</a>
*
* @return a builder object for client Properties
*/
public static AccumuloClient.PropertyOptions<Properties> newClientProperties() {
return new ClientContext.ClientBuilderImpl<>(ClientContext.ClientBuilderImpl::buildProps);
}
}
| 9,974 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import java.util.Iterator;
import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode;
import org.apache.accumulo.core.data.ConditionalMutation;
/**
* ConditionalWriter provides the ability to do efficient, atomic read-modify-write operations on
* rows. These operations are performed on the tablet server while a row lock is held.
*
* @since 1.6.0
*/
public interface ConditionalWriter extends AutoCloseable {
class Result {
private Status status;
private ConditionalMutation mutation;
private String server;
private Exception exception;
public Result(Status s, ConditionalMutation m, String server) {
this.status = s;
this.mutation = m;
this.server = server;
}
public Result(Exception e, ConditionalMutation cm, String server) {
this.exception = e;
this.mutation = cm;
this.server = server;
}
/**
* If this method throws an exception, then its possible the mutation is still being actively
* processed. Therefore if code chooses to continue after seeing an exception it should take
* this into consideration.
*
* @return status of a conditional mutation
*/
public Status getStatus() throws AccumuloException, AccumuloSecurityException {
if (status == null) {
if (exception instanceof AccumuloException) {
throw new AccumuloException(exception);
}
if (exception instanceof AccumuloSecurityException) {
AccumuloSecurityException ase = (AccumuloSecurityException) exception;
throw new AccumuloSecurityException(ase.getUser(),
SecurityErrorCode.valueOf(ase.getSecurityErrorCode().name()), ase.getTableInfo(),
ase);
} else {
throw new AccumuloException(exception);
}
}
return status;
}
/**
*
* @return A copy of the mutation previously submitted by a user. The mutation will reference
* the same data, but the object may be different.
*/
public ConditionalMutation getMutation() {
return mutation;
}
/**
*
* @return The server this mutation was sent to. Returns null if was not sent to a server.
*/
public String getTabletServer() {
return server;
}
}
enum Status {
/**
* conditions were met and mutation was written
*/
ACCEPTED,
/**
* conditions were not met and mutation was not written
*/
REJECTED,
/**
* mutation violated a constraint and was not written
*/
VIOLATED,
/**
* error occurred after mutation was sent to server, its unknown if the mutation was written.
* Although the status of the mutation is unknown, Accumulo guarantees the mutation will not be
* written at a later point in time.
*/
UNKNOWN,
/**
* A condition contained a column visibility that could never be seen
*/
INVISIBLE_VISIBILITY,
}
/**
* This method returns one result for each mutation passed to it. This method is thread safe.
* Multiple threads can safely use a single conditional writer. Sharing a conditional writer
* between multiple threads may result in batching of request to tablet servers.
*
* @return Result for each mutation submitted. The mutations may still be processing in the
* background when this method returns, if so the iterator will block.
*/
Iterator<Result> write(Iterator<ConditionalMutation> mutations);
/**
* This method has the same thread safety guarantees as {@link #write(Iterator)}
*
* @return Result for the submitted mutation
*/
Result write(ConditionalMutation mutation);
/**
* release any resources (like threads pools) used by conditional writer
*/
@Override
void close();
}
| 9,975 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/IsolatedScanner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.accumulo.core.clientImpl.IsolationException;
import org.apache.accumulo.core.clientImpl.ScannerOptions;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.hadoop.io.Text;
/**
* A scanner that presents a row isolated view of an accumulo table. Rows are buffered in memory on
* the client side. If you think your rows may not fit into memory, then you can provide an
* alternative row buffer factory to the constructor. This would allow rows to be buffered to disk
* for example.
*/
public class IsolatedScanner extends ScannerOptions implements Scanner {
private static class RowBufferingIterator implements Iterator<Entry<Key,Value>> {
private Iterator<Entry<Key,Value>> source;
private RowBuffer buffer;
private Entry<Key,Value> nextRowStart;
private Iterator<Entry<Key,Value>> rowIter;
private ByteSequence lastRow = null;
private long timeout;
private final Scanner scanner;
private ScannerOptions opts;
private Range range;
private int batchSize;
private long readaheadThreshold;
private void readRow() {
ByteSequence row = null;
while (true) {
buffer.clear();
try {
if (nextRowStart != null) {
buffer.add(nextRowStart);
row = nextRowStart.getKey().getRowData();
nextRowStart = null;
} else if (source.hasNext()) {
Entry<Key,Value> entry = source.next();
buffer.add(entry);
row = entry.getKey().getRowData();
}
while (source.hasNext()) {
Entry<Key,Value> entry = source.next();
if (entry.getKey().getRowData().equals(row)) {
buffer.add(entry);
} else {
nextRowStart = entry;
break;
}
}
lastRow = row;
rowIter = buffer.iterator();
// System.out.println("lastRow <- "+lastRow + " "+buffer);
return;
} catch (IsolationException ie) {
Range seekRange = null;
nextRowStart = null;
if (lastRow == null) {
seekRange = range;
} else {
Text lastRowText = new Text();
lastRowText.set(lastRow.getBackingArray(), lastRow.offset(), lastRow.length());
Key startKey = new Key(lastRowText).followingKey(PartialKey.ROW);
if (!range.afterEndKey(startKey)) {
seekRange = new Range(startKey, true, range.getEndKey(), range.isEndKeyInclusive());
}
// System.out.println(seekRange);
}
if (seekRange == null) {
buffer.clear();
rowIter = buffer.iterator();
return;
}
// wait a moment before retrying
sleepUninterruptibly(100, MILLISECONDS);
source = newIterator(seekRange);
}
}
}
private Iterator<Entry<Key,Value>> newIterator(Range r) {
synchronized (scanner) {
scanner.enableIsolation();
scanner.setBatchSize(batchSize);
scanner.setTimeout(timeout, MILLISECONDS);
scanner.setRange(r);
scanner.setReadaheadThreshold(readaheadThreshold);
setOptions((ScannerOptions) scanner, opts);
return scanner.iterator();
}
}
public RowBufferingIterator(Scanner scanner, ScannerOptions opts, Range range, long timeout,
int batchSize, long readaheadThreshold, RowBufferFactory bufferFactory) {
this.scanner = scanner;
this.opts = new ScannerOptions(opts);
this.range = range;
this.timeout = timeout;
this.batchSize = batchSize;
this.readaheadThreshold = readaheadThreshold;
buffer = bufferFactory.newBuffer();
this.source = newIterator(range);
readRow();
}
@Override
public boolean hasNext() {
return rowIter.hasNext();
}
@Override
public Entry<Key,Value> next() {
Entry<Key,Value> next = rowIter.next();
if (!rowIter.hasNext()) {
readRow();
}
return next;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
public interface RowBufferFactory {
RowBuffer newBuffer();
}
public interface RowBuffer extends Iterable<Entry<Key,Value>> {
void add(Entry<Key,Value> entry);
@Override
Iterator<Entry<Key,Value>> iterator();
void clear();
}
public static class MemoryRowBufferFactory implements RowBufferFactory {
@Override
public RowBuffer newBuffer() {
return new MemoryRowBuffer();
}
}
public static class MemoryRowBuffer implements RowBuffer {
private ArrayList<Entry<Key,Value>> buffer = new ArrayList<>();
@Override
public void add(Entry<Key,Value> entry) {
buffer.add(entry);
}
@Override
public Iterator<Entry<Key,Value>> iterator() {
return buffer.iterator();
}
@Override
public void clear() {
buffer.clear();
}
}
private Scanner scanner;
private Range range;
private int batchSize;
private long readaheadThreshold;
private RowBufferFactory bufferFactory;
public IsolatedScanner(Scanner scanner) {
this(scanner, new MemoryRowBufferFactory());
}
public IsolatedScanner(Scanner scanner, RowBufferFactory bufferFactory) {
this.scanner = scanner;
this.range = scanner.getRange();
this.retryTimeout = scanner.getTimeout(MILLISECONDS);
this.batchTimeout = scanner.getBatchTimeout(MILLISECONDS);
this.batchSize = scanner.getBatchSize();
this.readaheadThreshold = scanner.getReadaheadThreshold();
this.bufferFactory = bufferFactory;
}
@Override
public Iterator<Entry<Key,Value>> iterator() {
return new RowBufferingIterator(scanner, this, range, retryTimeout, batchSize,
readaheadThreshold, bufferFactory);
}
@Override
public void setRange(Range range) {
this.range = range;
}
@Override
public Range getRange() {
return range;
}
@Override
public void setBatchSize(int size) {
this.batchSize = size;
}
@Override
public int getBatchSize() {
return batchSize;
}
@Override
public void enableIsolation() {
// aye aye captain, already done sir
}
@Override
public void disableIsolation() {
throw new UnsupportedOperationException();
}
@Override
public long getReadaheadThreshold() {
return readaheadThreshold;
}
@Override
public void setReadaheadThreshold(long batches) {
if (batches < 0) {
throw new IllegalArgumentException(
"Number of batches before read-ahead must be non-negative");
}
this.readaheadThreshold = batches;
}
@Override
public void close() {
scanner.close();
}
}
| 9,976 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/RowIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.util.PeekingIterator;
import org.apache.hadoop.io.Text;
/**
* Group Key/Value pairs into Iterators over rows. Suggested usage:
*
* <pre>
* RowIterator rowIterator = new RowIterator(client.createScanner(tableName, authorizations));
* </pre>
*/
public class RowIterator implements Iterator<Iterator<Entry<Key,Value>>> {
/**
* Iterate over entries in a single row.
*/
private static class SingleRowIter implements Iterator<Entry<Key,Value>> {
private PeekingIterator<Entry<Key,Value>> source;
private Text currentRow = null;
private long count = 0;
private boolean disabled = false;
/**
* SingleRowIter must be passed a PeekingIterator so that it can peek at the next entry to see
* if it belongs in the current row or not.
*/
public SingleRowIter(PeekingIterator<Entry<Key,Value>> source) {
this.source = source;
if (source.hasNext()) {
currentRow = source.peek().getKey().getRow();
}
}
@Override
public boolean hasNext() {
if (disabled) {
throw new IllegalStateException("SingleRowIter no longer valid");
}
return currentRow != null;
}
@Override
public Entry<Key,Value> next() {
if (disabled) {
throw new IllegalStateException("SingleRowIter no longer valid");
}
return _next();
}
private Entry<Key,Value> _next() {
if (currentRow == null) {
throw new NoSuchElementException();
}
count++;
Entry<Key,Value> kv = source.next();
if (!source.hasNext() || !source.peek().getKey().getRow().equals(currentRow)) {
currentRow = null;
}
return kv;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
/**
* Get a count of entries read from the row (only equals the number of entries in the row when
* the row has been read fully).
*/
public long getCount() {
return count;
}
/**
* Consume the rest of the row. Disables the iterator from future use.
*/
public void consume() {
disabled = true;
while (currentRow != null) {
_next();
}
}
}
private final PeekingIterator<Entry<Key,Value>> iter;
private long count = 0;
private SingleRowIter lastIter = null;
/**
* Create an iterator from an (ordered) sequence of KeyValue pairs.
*/
public RowIterator(Iterator<Entry<Key,Value>> iterator) {
this.iter = new PeekingIterator<>(iterator);
}
/**
* Create an iterator from an Iterable.
*/
public RowIterator(Iterable<Entry<Key,Value>> iterable) {
this(iterable.iterator());
}
/**
* Returns true if there is at least one more row to get.
*
* If the last row hasn't been fully read, this method will read through the end of the last row
* so it can determine if the underlying iterator has a next row. The last row is disabled from
* future use.
*/
@Override
public boolean hasNext() {
if (lastIter != null) {
lastIter.consume();
count += lastIter.getCount();
lastIter = null;
}
return iter.hasNext();
}
/**
* Fetch the next row.
*/
@Override
public Iterator<Entry<Key,Value>> next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return lastIter = new SingleRowIter(iter);
}
/**
* Unsupported.
*/
@Override
public void remove() {
throw new UnsupportedOperationException();
}
/**
* Get a count of the total number of entries in all rows read so far.
*/
public long getKVCount() {
return count;
}
}
| 9,977 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/BatchDeleter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import java.util.Collection;
import org.apache.accumulo.core.data.Range;
/**
* Implementations of BatchDeleter support efficient deletion of ranges in accumulo.
*/
public interface BatchDeleter extends ScannerBase {
/**
* Deletes the ranges specified by {@link #setRanges}.
*
* @throws MutationsRejectedException this can be thrown when deletion mutations fail
* @throws TableNotFoundException when the table does not exist
*/
void delete() throws MutationsRejectedException, TableNotFoundException;
/**
* Allows deleting multiple ranges efficiently.
*
* @param ranges specifies the non-overlapping ranges to query
*/
void setRanges(Collection<Range> ranges);
@Override
void close();
}
| 9,978 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/TableExistsException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
/**
* Thrown when the table specified already exists, and it was expected that it didn't
*/
public class TableExistsException extends Exception {
/**
* Exception to throw if an operation is attempted on a table that already exists.
*
*/
private static final long serialVersionUID = 1L;
/**
* @param tableId the internal id of the table that exists
* @param tableName the visible name of the table that exists
* @param description the specific reason why it failed
*/
public TableExistsException(String tableId, String tableName, String description) {
super("Table" + (tableName != null && !tableName.isEmpty() ? " " + tableName : "")
+ (tableId != null && !tableId.isEmpty() ? " (Id=" + tableId + ")" : "") + " exists"
+ (description != null && !description.isEmpty() ? " (" + description + ")" : ""));
}
/**
* @param tableId the internal id of the table that exists
* @param tableName the visible name of the table that exists
* @param description the specific reason why it failed
* @param cause the exception that caused this failure
*/
public TableExistsException(String tableId, String tableName, String description,
Throwable cause) {
this(tableId, tableName, description);
super.initCause(cause);
}
/**
* @param e constructs an exception from a thrift exception
*/
public TableExistsException(ThriftTableOperationException e) {
this(e.getTableId(), e.getTableName(), e.getDescription(), e);
}
}
| 9,979 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/ScannerBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import org.apache.accumulo.core.client.IteratorSetting.Column;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.spi.scan.HintScanPrioritizer;
import org.apache.accumulo.core.spi.scan.ScanDispatcher;
import org.apache.accumulo.core.spi.scan.ScanInfo;
import org.apache.accumulo.core.spi.scan.ScanPrioritizer;
import org.apache.accumulo.core.spi.scan.SimpleScanDispatcher;
import org.apache.hadoop.io.Text;
/**
* This class hosts configuration methods that are shared between different types of scanners.
*/
public interface ScannerBase extends Iterable<Entry<Key,Value>>, AutoCloseable {
/**
* Consistency level for the scanner. The default level is IMMEDIATE, which means that this
* scanner will see keys and values that have been successfully written to a TabletServer.
* EVENTUAL means that the scanner may not see the latest data that was written to a TabletServer,
* but may instead see an older version of data.
*
* @since 2.1.0
*/
enum ConsistencyLevel {
IMMEDIATE, EVENTUAL
}
/**
* Add a server-side scan iterator.
*
* @param cfg fully specified scan-time iterator, including all options for the iterator. Any
* changes to the iterator setting after this call are not propagated to the stored
* iterator.
* @throws IllegalArgumentException if the setting conflicts with existing iterators
*/
void addScanIterator(IteratorSetting cfg);
/**
* Remove an iterator from the list of iterators.
*
* @param iteratorName nickname used for the iterator
*/
void removeScanIterator(String iteratorName);
/**
* Update the options for an iterator. Note that this does <b>not</b> change the iterator options
* during a scan, it just replaces the given option on a configured iterator before a scan is
* started.
*
* @param iteratorName the name of the iterator to change
* @param key the name of the option
* @param value the new value for the named option
*/
void updateScanIteratorOption(String iteratorName, String key, String value);
/**
* Adds a column family to the list of columns that will be fetched by this scanner. By default
* when no columns have been added the scanner fetches all columns. To fetch multiple column
* families call this function multiple times.
*
* <p>
* This can help limit which locality groups are read on the server side.
*
* <p>
* When used in conjunction with custom iterators, the set of column families fetched is passed to
* the top iterator's seek method. Custom iterators may change this set of column families when
* calling seek on their source.
*
* @param col the column family to be fetched
*/
void fetchColumnFamily(Text col);
/**
* Adds a column family to the list of columns that will be fetched by this scanner. By default
* when no columns have been added the scanner fetches all columns. To fetch multiple column
* families call this function multiple times.
*
* <p>
* This can help limit which locality groups are read on the server side.
*
* <p>
* When used in conjunction with custom iterators, the set of column families fetched is passed to
* the top iterator's seek method. Custom iterators may change this set of column families when
* calling seek on their source.
*
* @param colFam the column family to be fetched
* @since 2.0.0
*/
default void fetchColumnFamily(CharSequence colFam) {
Objects.requireNonNull(colFam);
fetchColumnFamily(new Text(colFam.toString()));
}
/**
* Adds a column to the list of columns that will be fetched by this scanner. The column is
* identified by family and qualifier. By default when no columns have been added the scanner
* fetches all columns.
*
* <p>
* <b>WARNING</b>. Using this method with custom iterators may have unexpected results. Iterators
* have control over which column families are fetched. However iterators have no control over
* which column qualifiers are fetched. When this method is called it activates a system iterator
* that only allows the requested family/qualifier pairs through. This low level filtering
* prevents custom iterators from requesting additional column families when calling seek.
*
* <p>
* For an example, assume fetchColumns(A, Q1) and fetchColumns(B,Q1) is called on a scanner and a
* custom iterator is configured. The families (A,B) will be passed to the seek method of the
* custom iterator. If the custom iterator seeks its source iterator using the families (A,B,C),
* it will never see any data from C because the system iterator filtering A:Q1 and B:Q1 will
* prevent the C family from getting through. ACCUMULO-3905 also has an example of the type of
* problem this method can cause.
*
* <p>
* tl;dr If using a custom iterator with a seek method that adds column families, then may want to
* avoid using this method.
*
* @param colFam the column family of the column to be fetched
* @param colQual the column qualifier of the column to be fetched
*/
void fetchColumn(Text colFam, Text colQual);
/**
* Adds a column to the list of columns that will be fetched by this scanner. The column is
* identified by family and qualifier. By default when no columns have been added the scanner
* fetches all columns. See the warning on {@link #fetchColumn(Text, Text)}
*
*
* @param colFam the column family of the column to be fetched
* @param colQual the column qualifier of the column to be fetched
* @since 2.0.0
*/
default void fetchColumn(CharSequence colFam, CharSequence colQual) {
Objects.requireNonNull(colFam);
Objects.requireNonNull(colQual);
fetchColumn(new Text(colFam.toString()), new Text(colQual.toString()));
}
/**
* Adds a column to the list of columns that will be fetch by this scanner.
*
* @param column the {@link Column} to fetch
* @since 1.7.0
*/
void fetchColumn(Column column);
/**
* Clears the columns to be fetched (useful for resetting the scanner for reuse). Once cleared,
* the scanner will fetch all columns.
*/
void clearColumns();
/**
* Clears scan iterators prior to returning a scanner to the pool.
*/
void clearScanIterators();
/**
* Returns an iterator over an accumulo table. This iterator uses the options that are currently
* set for its lifetime, so setting options will have no effect on existing iterators.
*
* <p>
* Keys returned by the iterator are not guaranteed to be in sorted order.
*
* @return an iterator over Key,Value pairs which meet the restrictions set on the scanner
*/
@Override
Iterator<Entry<Key,Value>> iterator();
/**
* This setting determines how long a scanner will automatically retry when a failure occurs. By
* default, a scanner will retry forever.
*
* <p>
* Setting the timeout to zero (with any time unit) or {@link Long#MAX_VALUE} (with
* {@link TimeUnit#MILLISECONDS}) means no timeout.
*
* @param timeOut the length of the timeout
* @param timeUnit the units of the timeout
* @since 1.5.0
*/
void setTimeout(long timeOut, TimeUnit timeUnit);
/**
* Returns the setting for how long a scanner will automatically retry when a failure occurs.
*
* @return the timeout configured for this scanner
* @since 1.5.0
*/
long getTimeout(TimeUnit timeUnit);
/**
* Closes any underlying connections on the scanner. This may invalidate any iterators derived
* from the Scanner, causing them to throw exceptions.
*
* @since 1.5.0
*/
@Override
void close();
/**
* Returns the authorizations that have been set on the scanner
*
* @since 1.7.0
* @return The authorizations set on the scanner instance
*/
Authorizations getAuthorizations();
/**
* Setting this will cause the scanner to read sample data, as long as that sample data was
* generated with the given configuration. By default this is not set and all data is read.
*
* <p>
* One way to use this method is as follows, where the sampler configuration is obtained from the
* table configuration. Sample data can be generated in many different ways, so its important to
* verify the sample data configuration meets expectations.
*
* <pre>
* <code>
* // could cache this if creating many scanners to avoid RPCs.
* SamplerConfiguration samplerConfig =
* client.tableOperations().getSamplerConfiguration(table);
* // verify table's sample data is generated in an expected way before using
* userCode.verifySamplerConfig(samplerConfig);
* scanner.setSamplerConfiguration(samplerConfig);
* </code>
* </pre>
*
* <p>
* Of course this is not the only way to obtain a {@link SamplerConfiguration}, it could be a
* constant, configuration, etc.
*
* <p>
* If sample data is not present or sample data was generated with a different configuration, then
* the scanner iterator will throw a {@link SampleNotPresentException}. Also if a table's sampler
* configuration is changed while a scanner is iterating over a table, a
* {@link SampleNotPresentException} may be thrown.
*
* @since 1.8.0
*/
void setSamplerConfiguration(SamplerConfiguration samplerConfig);
/**
* @return currently set sampler configuration. Returns null if no sampler configuration is set.
* @since 1.8.0
*/
SamplerConfiguration getSamplerConfiguration();
/**
* Clears sampler configuration making a scanner read all data. After calling this,
* {@link #getSamplerConfiguration()} should return null.
*
* @since 1.8.0
*/
void clearSamplerConfiguration();
/**
* This setting determines how long a scanner will wait to fill the returned batch. By default, a
* scanner wait until the batch is full.
*
* <p>
* Setting the timeout to zero (with any time unit) or {@link Long#MAX_VALUE} (with
* {@link TimeUnit#MILLISECONDS}) means no timeout.
*
* @param timeOut the length of the timeout
* @param timeUnit the units of the timeout
* @since 1.8.0
*/
void setBatchTimeout(long timeOut, TimeUnit timeUnit);
/**
* Returns the timeout to fill a batch in the given TimeUnit.
*
* @return the batch timeout configured for this scanner
* @since 1.8.0
*/
long getBatchTimeout(TimeUnit timeUnit);
/**
* Sets the name of the classloader context on this scanner. See the administration chapter of the
* user manual for details on how to configure and use classloader contexts.
*
* @param classLoaderContext name of the classloader context
* @throws NullPointerException if context is null
* @since 1.8.0
*/
void setClassLoaderContext(String classLoaderContext);
/**
* Clears the current classloader context set on this scanner
*
* @since 1.8.0
*/
void clearClassLoaderContext();
/**
* Returns the name of the current classloader context set on this scanner
*
* @return name of the current context
* @since 1.8.0
*/
String getClassLoaderContext();
/**
* Set hints for the configured {@link ScanPrioritizer} and {@link ScanDispatcher}. These hints
* are available on the server side via {@link ScanInfo#getExecutionHints()} Depending on the
* configuration, these hints may be ignored. Hints will never impact what data is returned by a
* scan, only how quickly it is returned.
*
* <p>
* Using the hint {@code scan_type=<type>} and documenting all of the types for your application
* is one strategy to consider. This allows administrators to adjust executor and prioritizer
* config for your application scan types without having to change the application source code.
*
* <p>
* The default configuration for Accumulo will ignore hints. See {@link HintScanPrioritizer} and
* {@link SimpleScanDispatcher} for examples of classes that can react to hints.
*
* @since 2.0.0
*/
default void setExecutionHints(Map<String,String> hints) {
throw new UnsupportedOperationException();
}
/**
* Iterates through Scanner results.
*
* @param keyValueConsumer user-defined BiConsumer
* @since 2.1.0
*/
default void forEach(BiConsumer<? super Key,? super Value> keyValueConsumer) {
for (Entry<Key,Value> entry : this) {
keyValueConsumer.accept(entry.getKey(), entry.getValue());
}
}
/**
* Get the configured consistency level
*
* @return consistency level
* @since 2.1.0
*/
public ConsistencyLevel getConsistencyLevel();
/**
* Set the desired consistency level for this scanner.
*
* @param level consistency level
* @since 2.1.0
*/
public void setConsistencyLevel(ConsistencyLevel level);
/**
* Stream the Scanner results sequentially from this scanner's iterator
*
* @return a Stream of the returned key-value pairs
* @since 2.1.0
*/
default Stream<Entry<Key,Value>> stream() {
return StreamSupport.stream(this.spliterator(), false);
}
}
| 9,980 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriterConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.apache.accumulo.core.conf.ClientProperty.CONDITIONAL_WRITER_THREADS_MAX;
import static org.apache.accumulo.core.conf.ClientProperty.CONDITIONAL_WRITER_TIMEOUT_MAX;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.accumulo.core.security.Authorizations;
/**
* @since 1.6.0
*/
public class ConditionalWriterConfig {
private static final Long DEFAULT_TIMEOUT = getDefaultTimeout();
private static final Integer DEFAULT_MAX_WRITE_THREADS =
Integer.parseInt(CONDITIONAL_WRITER_THREADS_MAX.getDefaultValue());
private Long timeout = null;
private Integer maxWriteThreads = null;
private Authorizations auths = null;
private Durability durability = null;
private String classLoaderContext = null;
/**
* A set of authorization labels that will be checked against the column visibility of each key in
* order to filter data. The authorizations passed in must be a subset of the accumulo user's set
* of authorizations. If the accumulo user has authorizations (A1, A2) and authorizations (A2, A3)
* are passed, then an exception will be thrown.
*
* <p>
* Any condition that is not visible with this set of authorizations will fail.
*/
public ConditionalWriterConfig setAuthorizations(Authorizations auths) {
checkArgument(auths != null, "auths is null");
this.auths = auths;
return this;
}
/**
* Sets the maximum amount of time an unresponsive server will be re-tried. When this timeout is
* exceeded, the {@link ConditionalWriter} should return the mutation with an exception.<br>
* For no timeout, set to zero, or {@link Long#MAX_VALUE} with {@link TimeUnit#MILLISECONDS}.
*
* <p>
* {@link TimeUnit#MICROSECONDS} or {@link TimeUnit#NANOSECONDS} will be truncated to the nearest
* {@link TimeUnit#MILLISECONDS}.<br>
* If this truncation would result in making the value zero when it was specified as non-zero,
* then a minimum value of one {@link TimeUnit#MILLISECONDS} will be used.
*
* <p>
* <b>Default:</b> {@link Long#MAX_VALUE} (no timeout)
*
* @param timeout the timeout, in the unit specified by the value of {@code timeUnit}
* @param timeUnit determines how {@code timeout} will be interpreted
* @throws IllegalArgumentException if {@code timeout} is less than 0
* @return {@code this} to allow chaining of set methods
*/
public ConditionalWriterConfig setTimeout(long timeout, TimeUnit timeUnit) {
if (timeout < 0) {
throw new IllegalArgumentException("Negative timeout not allowed " + timeout);
}
if (timeout == 0) {
this.timeout = Long.MAX_VALUE;
} else {
// make small, positive values that truncate to 0 when converted use the minimum millis
// instead
this.timeout = Math.max(1, timeUnit.toMillis(timeout));
}
return this;
}
/**
* Sets the maximum number of threads to use for writing data to the tablet servers.
*
* <p>
* <b>Default:</b> 3
*
* @param maxWriteThreads the maximum threads to use
* @throws IllegalArgumentException if {@code maxWriteThreads} is non-positive
* @return {@code this} to allow chaining of set methods
*/
public ConditionalWriterConfig setMaxWriteThreads(int maxWriteThreads) {
if (maxWriteThreads <= 0) {
throw new IllegalArgumentException("Max threads must be positive " + maxWriteThreads);
}
this.maxWriteThreads = maxWriteThreads;
return this;
}
/**
* Sets the Durability for the mutation, if applied.
* <p>
* <b>Default:</b> Durability.DEFAULT: use the table's durability configuration.
*
* @return {@code this} to allow chaining of set methods
* @since 1.7.0
*/
public ConditionalWriterConfig setDurability(Durability durability) {
this.durability = durability;
return this;
}
public Authorizations getAuthorizations() {
return auths != null ? auths : Authorizations.EMPTY;
}
public long getTimeout(TimeUnit timeUnit) {
return timeUnit.convert(timeout != null ? timeout : DEFAULT_TIMEOUT, MILLISECONDS);
}
public int getMaxWriteThreads() {
return maxWriteThreads != null ? maxWriteThreads : DEFAULT_MAX_WRITE_THREADS;
}
public Durability getDurability() {
return durability != null ? durability : Durability.DEFAULT;
}
private static long getDefaultTimeout() {
long defVal =
ConfigurationTypeHelper.getTimeInMillis(CONDITIONAL_WRITER_TIMEOUT_MAX.getDefaultValue());
return defVal != 0L ? defVal : Long.MAX_VALUE;
}
/**
* Sets the name of the classloader context on this scanner. See the administration chapter of the
* user manual for details on how to configure and use classloader contexts.
*
* @param classLoaderContext name of the classloader context
* @throws NullPointerException if context is null
* @since 1.8.0
*/
public void setClassLoaderContext(String classLoaderContext) {
requireNonNull(classLoaderContext, "context name cannot be null");
this.classLoaderContext = classLoaderContext;
}
/**
* Clears the current classloader context set on this scanner
*
* @since 1.8.0
*/
public void clearClassLoaderContext() {
this.classLoaderContext = null;
}
/**
* Returns the name of the current classloader context set on this scanner
*
* @return name of the current context
* @since 1.8.0
*/
public String getClassLoaderContext() {
return this.classLoaderContext;
}
private static <T> T merge(T o1, T o2) {
if (o1 != null) {
return o1;
}
return o2;
}
/**
* Merge this ConditionalWriterConfig with another. If config is set in both, preference will be
* given to this config.
*
* @param other Another ConditionalWriterConfig
* @return Merged ConditionalWriterConfig
* @since 2.1.0
*/
public ConditionalWriterConfig merge(ConditionalWriterConfig other) {
ConditionalWriterConfig result = new ConditionalWriterConfig();
result.timeout = merge(this.timeout, other.timeout);
result.maxWriteThreads = merge(this.maxWriteThreads, other.maxWriteThreads);
result.durability = merge(this.durability, other.durability);
result.auths = merge(this.auths, other.auths);
return result;
}
}
| 9,981 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.apache.accumulo.core.conf.ClientProperty.BATCH_WRITER_LATENCY_MAX;
import static org.apache.accumulo.core.conf.ClientProperty.BATCH_WRITER_MEMORY_MAX;
import static org.apache.accumulo.core.conf.ClientProperty.BATCH_WRITER_THREADS_MAX;
import static org.apache.accumulo.core.conf.ClientProperty.BATCH_WRITER_TIMEOUT_MAX;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.clientImpl.DurabilityImpl;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.StringUtils;
/**
* This object holds configuration settings used to instantiate a {@link BatchWriter}
*
* @since 1.5.0
*/
public class BatchWriterConfig implements Writable {
private static final Long DEFAULT_MAX_MEMORY =
ConfigurationTypeHelper.getMemoryAsBytes(BATCH_WRITER_MEMORY_MAX.getDefaultValue());
private Long maxMemory = null;
private static final Long DEFAULT_MAX_LATENCY =
ConfigurationTypeHelper.getTimeInMillis(BATCH_WRITER_LATENCY_MAX.getDefaultValue());
private Long maxLatency = null;
private static final long DEFAULT_TIMEOUT = getDefaultTimeout();
private Long timeout = null;
private static final Integer DEFAULT_MAX_WRITE_THREADS =
Integer.parseInt(BATCH_WRITER_THREADS_MAX.getDefaultValue());
private Integer maxWriteThreads = null;
private Durability durability = Durability.DEFAULT;
private boolean isDurabilitySet = false;
private static long getDefaultTimeout() {
long defVal =
ConfigurationTypeHelper.getTimeInMillis(BATCH_WRITER_TIMEOUT_MAX.getDefaultValue());
if (defVal == 0L) {
return Long.MAX_VALUE;
} else {
return defVal;
}
}
/**
* Sets the maximum memory to batch before writing. The smaller this value, the more frequently
* the {@link BatchWriter} will write.<br>
* If set to a value smaller than a single mutation, then it will {@link BatchWriter#flush()}
* after each added mutation. Must be non-negative.
*
* <p>
* <b>Default:</b> 50M
*
* @param maxMemory max size in bytes
* @throws IllegalArgumentException if {@code maxMemory} is less than 0
* @return {@code this} to allow chaining of set methods
*/
public BatchWriterConfig setMaxMemory(long maxMemory) {
if (maxMemory < 0) {
throw new IllegalArgumentException("Max memory must be non-negative.");
}
this.maxMemory = maxMemory;
return this;
}
/**
* Sets the maximum amount of time to hold the data in memory before flushing it to servers.<br>
* For no maximum, set to zero, or {@link Long#MAX_VALUE} with {@link TimeUnit#MILLISECONDS}.
*
* <p>
* {@link TimeUnit#MICROSECONDS} or {@link TimeUnit#NANOSECONDS} will be truncated to the nearest
* {@link TimeUnit#MILLISECONDS}.<br>
* If this truncation would result in making the value zero when it was specified as non-zero,
* then a minimum value of one {@link TimeUnit#MILLISECONDS} will be used.
*
* <p>
* <b>Default:</b> 120 seconds
*
* @param maxLatency the maximum latency, in the unit specified by the value of {@code timeUnit}
* @param timeUnit determines how {@code maxLatency} will be interpreted
* @throws IllegalArgumentException if {@code maxLatency} is less than 0
* @return {@code this} to allow chaining of set methods
*/
public BatchWriterConfig setMaxLatency(long maxLatency, TimeUnit timeUnit) {
if (maxLatency < 0) {
throw new IllegalArgumentException("Negative max latency not allowed " + maxLatency);
}
if (maxLatency == 0) {
this.maxLatency = Long.MAX_VALUE;
} else {
// make small, positive values that truncate to 0 when converted use the minimum millis
// instead
this.maxLatency = Math.max(1, timeUnit.toMillis(maxLatency));
}
return this;
}
/**
* Sets the maximum amount of time an unresponsive server will be re-tried. When this timeout is
* exceeded, the {@link BatchWriter} should throw an exception.<br>
* For no timeout, set to zero, or {@link Long#MAX_VALUE} with {@link TimeUnit#MILLISECONDS}.
*
* <p>
* {@link TimeUnit#MICROSECONDS} or {@link TimeUnit#NANOSECONDS} will be truncated to the nearest
* {@link TimeUnit#MILLISECONDS}.<br>
* If this truncation would result in making the value zero when it was specified as non-zero,
* then a minimum value of one {@link TimeUnit#MILLISECONDS} will be used.
*
* <p>
* <b>Default:</b> {@link Long#MAX_VALUE} (no timeout)
*
* @param timeout the timeout, in the unit specified by the value of {@code timeUnit}
* @param timeUnit determines how {@code timeout} will be interpreted
* @throws IllegalArgumentException if {@code timeout} is less than 0
* @return {@code this} to allow chaining of set methods
*/
public BatchWriterConfig setTimeout(long timeout, TimeUnit timeUnit) {
if (timeout < 0) {
throw new IllegalArgumentException("Negative timeout not allowed " + timeout);
}
if (timeout == 0) {
this.timeout = Long.MAX_VALUE;
} else {
// make small, positive values that truncate to 0 when converted use the minimum millis
// instead
this.timeout = Math.max(1, timeUnit.toMillis(timeout));
}
return this;
}
/**
* Sets the maximum number of threads to use for writing data to the tablet servers.
*
* <p>
* <b>Default:</b> 3
*
* @param maxWriteThreads the maximum threads to use
* @throws IllegalArgumentException if {@code maxWriteThreads} is non-positive
* @return {@code this} to allow chaining of set methods
*/
public BatchWriterConfig setMaxWriteThreads(int maxWriteThreads) {
if (maxWriteThreads <= 0) {
throw new IllegalArgumentException("Max threads must be positive " + maxWriteThreads);
}
this.maxWriteThreads = maxWriteThreads;
return this;
}
public long getMaxMemory() {
return maxMemory != null ? maxMemory : DEFAULT_MAX_MEMORY;
}
public long getMaxLatency(TimeUnit timeUnit) {
return timeUnit.convert(maxLatency != null ? maxLatency : DEFAULT_MAX_LATENCY, MILLISECONDS);
}
public long getTimeout(TimeUnit timeUnit) {
return timeUnit.convert(timeout != null ? timeout : DEFAULT_TIMEOUT, MILLISECONDS);
}
public int getMaxWriteThreads() {
return maxWriteThreads != null ? maxWriteThreads : DEFAULT_MAX_WRITE_THREADS;
}
/**
* @since 1.7.0
* @return the durability to be used by the BatchWriter
*/
public Durability getDurability() {
return durability;
}
/**
* Change the durability for the BatchWriter session. The default durability is "default" which is
* the table's durability setting. If the durability is set to something other than the default,
* it will override the durability setting of the table.
*
* @param durability the Durability to be used by the BatchWriter
* @since 1.7.0
*
*/
public BatchWriterConfig setDurability(Durability durability) {
this.durability = durability;
isDurabilitySet = true;
return this;
}
@Override
public void write(DataOutput out) throws IOException {
// write this out in a human-readable way
ArrayList<String> fields = new ArrayList<>();
if (maxMemory != null) {
addField(fields, "maxMemory", maxMemory);
}
if (maxLatency != null) {
addField(fields, "maxLatency", maxLatency);
}
if (maxWriteThreads != null) {
addField(fields, "maxWriteThreads", maxWriteThreads);
}
if (timeout != null) {
addField(fields, "timeout", timeout);
}
if (durability != Durability.DEFAULT) {
addField(fields, "durability", durability);
}
String output = StringUtils.join(",", fields);
byte[] bytes = output.getBytes(UTF_8);
byte[] len = String.format("%6s#", Integer.toString(bytes.length, 36)).getBytes(UTF_8);
if (len.length != 7) {
throw new IllegalStateException("encoded length does not match expected value");
}
out.write(len);
out.write(bytes);
}
private void addField(List<String> fields, String name, Object value) {
String key = StringUtils.escapeString(name, '\\', new char[] {',', '='});
String val = StringUtils.escapeString(String.valueOf(value), '\\', new char[] {',', '='});
fields.add(key + '=' + val);
}
@Override
public void readFields(DataInput in) throws IOException {
byte[] len = new byte[7];
in.readFully(len);
String strLen = new String(len, UTF_8);
if (!strLen.endsWith("#")) {
throw new IllegalStateException("length was not encoded correctly");
}
byte[] bytes = new byte[Integer
.parseInt(strLen.substring(strLen.lastIndexOf(' ') + 1, strLen.length() - 1), 36)];
in.readFully(bytes);
String strFields = new String(bytes, UTF_8);
String[] fields = StringUtils.split(strFields, '\\', ',');
for (String field : fields) {
String[] keyValue = StringUtils.split(field, '\\', '=');
String key = keyValue[0];
String value = keyValue[1];
if ("maxMemory".equals(key)) {
maxMemory = Long.valueOf(value);
} else if ("maxLatency".equals(key)) {
maxLatency = Long.valueOf(value);
} else if ("maxWriteThreads".equals(key)) {
maxWriteThreads = Integer.valueOf(value);
} else if ("timeout".equals(key)) {
timeout = Long.valueOf(value);
} else if ("durability".equals(key)) {
durability = DurabilityImpl.fromString(value);
} else {
/* ignore any other properties */
}
}
}
@Override
public boolean equals(Object o) {
if (o instanceof BatchWriterConfig) {
BatchWriterConfig other = (BatchWriterConfig) o;
if (maxMemory != null) {
if (!maxMemory.equals(other.maxMemory)) {
return false;
}
} else {
if (other.maxMemory != null) {
return false;
}
}
if (maxLatency != null) {
if (!maxLatency.equals(other.maxLatency)) {
return false;
}
} else {
if (other.maxLatency != null) {
return false;
}
}
if (maxWriteThreads != null) {
if (!maxWriteThreads.equals(other.maxWriteThreads)) {
return false;
}
} else {
if (other.maxWriteThreads != null) {
return false;
}
}
if (timeout != null) {
if (!timeout.equals(other.timeout)) {
return false;
}
} else {
if (other.timeout != null) {
return false;
}
}
return durability == other.durability;
}
return false;
}
private static <T> T merge(T o1, T o2) {
if (o1 != null) {
return o1;
}
return o2;
}
/**
* Merge this BatchWriterConfig with another. If config is set in both, preference will be given
* to this config.
*
* @param other Another BatchWriterConfig
* @return Merged BatchWriterConfig
* @since 2.0.0
*/
public BatchWriterConfig merge(BatchWriterConfig other) {
BatchWriterConfig result = new BatchWriterConfig();
result.maxMemory = merge(this.maxMemory, other.maxMemory);
result.maxLatency = merge(this.maxLatency, other.maxLatency);
result.timeout = merge(this.timeout, other.timeout);
result.maxWriteThreads = merge(this.maxWriteThreads, other.maxWriteThreads);
if (this.isDurabilitySet) {
result.durability = this.durability;
} else if (other.isDurabilitySet) {
result.durability = other.durability;
}
return result;
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder();
hcb.append(maxMemory).append(maxLatency).append(maxWriteThreads).append(timeout)
.append(durability);
return hcb.toHashCode();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder(32);
sb.append("[maxMemory=").append(getMaxMemory()).append(", maxLatency=")
.append(getMaxLatency(MILLISECONDS)).append(", maxWriteThreads=")
.append(getMaxWriteThreads()).append(", timeout=").append(getTimeout(MILLISECONDS))
.append(", durability=").append(durability).append("]");
return sb.toString();
}
}
| 9,982 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/NamespaceNotFoundException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
/**
* Thrown when the namespace specified doesn't exist when it was expected to
*/
public class NamespaceNotFoundException extends Exception {
/**
* Exception to throw if an operation is attempted on a namespace that doesn't exist.
*/
private static final long serialVersionUID = 1L;
private String namespace;
/**
* @param namespaceId the internal id of the namespace that was sought
* @param namespaceName the visible name of the namespace that was sought
* @param description the specific reason why it failed
*/
public NamespaceNotFoundException(String namespaceId, String namespaceName, String description) {
super(
"Namespace" + (namespaceName != null && !namespaceName.isEmpty() ? " " + namespaceName : "")
+ (namespaceId != null && !namespaceId.isEmpty() ? " (Id=" + namespaceId + ")" : "")
+ " does not exist"
+ (description != null && !description.isEmpty() ? " (" + description + ")" : ""));
this.namespace = namespaceName;
}
/**
* @param namespaceId the internal id of the namespace that was sought
* @param namespaceName the visible name of the namespace that was sought
* @param description the specific reason why it failed
* @param cause the exception that caused this failure
*/
public NamespaceNotFoundException(String namespaceId, String namespaceName, String description,
Throwable cause) {
this(namespaceId, namespaceName, description);
super.initCause(cause);
}
/**
* @param e constructs an exception from a thrift exception
*/
public NamespaceNotFoundException(ThriftTableOperationException e) {
this(e.getTableId(), e.getTableName(), e.getDescription(), e);
}
/**
* @return the name of the namespace sought
*/
public String getNamespaceName() {
return namespace;
}
}
| 9,983 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/SampleNotPresentException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
/**
* Exception thrown when a table does not have sampling configured or when sampling is configured
* but it differs from what was requested.
*
* @since 1.8.0
*/
public class SampleNotPresentException extends RuntimeException {
public SampleNotPresentException(String message, Exception cause) {
super(message, cause);
}
public SampleNotPresentException(String message) {
super(message);
}
public SampleNotPresentException() {}
private static final long serialVersionUID = 1L;
}
| 9,984 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/AccumuloClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import java.lang.Thread.UncaughtExceptionHandler;
import java.net.URL;
import java.nio.file.Path;
import java.util.Properties;
import org.apache.accumulo.core.client.admin.InstanceOperations;
import org.apache.accumulo.core.client.admin.NamespaceOperations;
import org.apache.accumulo.core.client.admin.SecurityOperations;
import org.apache.accumulo.core.client.admin.TableOperations;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.security.Authorizations;
/**
* Client connection to an Accumulo instance. Allows the user to request a scanner, deleter or
* writer for the instance as well as various objects that permit administrative operations.
* Enforces security on the client side by requiring user credentials.
*
* <p>
* Supports fluent API for creation. Various options can be provided to {@link Accumulo#newClient()}
* and when finished a call to build() will return the AccumuloClient object. For example:
*
* <pre>
* <code>
* try (AccumuloClient client = Accumulo.newClient()
* .to(instanceName, zookeepers)
* .as(user, password).build())
* {
* // use the client
* }
* </code>
* </pre>
*
* <p>
* An important difference with the legacy Connector to consider is that Connector reused global
* static resources. AccumuloClient, however, attempts to clean up its resources on close. So,
* creating many AccumuloClient objects will perform worse than creating many Connectors did.
* Therefore, it is suggested to reuse AccumuloClient instances where possible, rather than create
* many of them.
*
* <p>
* AccumuloClient objects are intended to be thread-safe, and can be used by multiple threads.
* However, care should be taken to ensure that the client is eventually closed, to clean up any
* resources in use in the client application when all threads are finished with the AccumuloClient
* object. Additionally, while the client itself is thread-safe, it is not necessarily true that all
* objects produced from the client (such as Scanners) are thread-safe.
*
* @since 2.0.0
* @see <a href="https://accumulo.apache.org/docs/2.x/getting-started/clients">Accumulo Client
* Documentation</a>
*/
public interface AccumuloClient extends AutoCloseable {
/**
* Factory method to create a BatchScanner connected to Accumulo.
*
* @param tableName the name of the table to query
* @param authorizations A set of authorization labels that will be checked against the column
* visibility of each key in order to filter data. The authorizations passed in must be a
* subset of the accumulo user's set of authorizations. If the accumulo user has
* authorizations (A1, A2) and authorizations (A2, A3) are passed, then an exception will
* be thrown.
* @param numQueryThreads the number of concurrent threads to spawn for querying
*
* @return BatchScanner object for configuring and querying
* @throws TableNotFoundException when the specified table doesn't exist
*/
BatchScanner createBatchScanner(String tableName, Authorizations authorizations,
int numQueryThreads) throws TableNotFoundException;
/**
* Factory method to create a BatchScanner connected to Accumulo. This method uses the number of
* query threads configured when AccumuloClient was created. If none were configured, defaults
* will be used.
*
* @param tableName the name of the table to query
* @param authorizations A set of authorization labels that will be checked against the column
* visibility of each key in order to filter data. The authorizations passed in must be a
* subset of the accumulo user's set of authorizations. If the accumulo user has
* authorizations (A1, A2) and authorizations (A2, A3) are passed, then an exception will
* be thrown.
*
* @return BatchScanner object for configuring and querying
* @throws TableNotFoundException when the specified table doesn't exist
*/
BatchScanner createBatchScanner(String tableName, Authorizations authorizations)
throws TableNotFoundException;
/**
* Factory method to create a BatchScanner with all of user's authorizations and the number of
* query threads configured when AccumuloClient was created. If no query threads were configured,
* defaults will be used.
*
* @param tableName the name of the table to query
*
* @return BatchScanner object for configuring and querying
* @throws TableNotFoundException when the specified table doesn't exist
*/
BatchScanner createBatchScanner(String tableName)
throws TableNotFoundException, AccumuloSecurityException, AccumuloException;
/**
* Factory method to create BatchDeleter
*
* @param tableName the name of the table to query and delete from
* @param authorizations A set of authorization labels that will be checked against the column
* visibility of each key in order to filter data. The authorizations passed in must be a
* subset of the accumulo user's set of authorizations. If the accumulo user has
* authorizations (A1, A2) and authorizations (A2, A3) are passed, then an exception will
* be thrown.
* @param numQueryThreads the number of concurrent threads to spawn for querying
* @param config configuration used to create batch writer. This config takes precedence. Any
* unset values will be merged with config set when the AccumuloClient was created. If no
* config was set during AccumuloClient creation, BatchWriterConfig defaults will be used.
* @return BatchDeleter object for configuring and deleting
*/
BatchDeleter createBatchDeleter(String tableName, Authorizations authorizations,
int numQueryThreads, BatchWriterConfig config) throws TableNotFoundException;
/**
* Factory method to create BatchDeleter. This method uses BatchWriterConfig set when
* AccumuloClient was created. If none was set, BatchWriterConfig defaults will be used.
*
* @param tableName the name of the table to query and delete from
* @param authorizations A set of authorization labels that will be checked against the column
* visibility of each key in order to filter data. The authorizations passed in must be a
* subset of the accumulo user's set of authorizations. If the accumulo user has
* authorizations (A1, A2) and authorizations (A2, A3) are passed, then an exception will
* be thrown.
* @param numQueryThreads the number of concurrent threads to spawn for querying
* @return BatchDeleter object
* @throws TableNotFoundException if table not found
*/
BatchDeleter createBatchDeleter(String tableName, Authorizations authorizations,
int numQueryThreads) throws TableNotFoundException;
/**
* Factory method to create a BatchWriter connected to Accumulo.
*
* @param tableName the name of the table to insert data into
* @param config configuration used to create batch writer. This config will take precedence. Any
* unset values will be merged with the config set when the AccumuloClient was created. If
* no config was set during AccumuloClient creation, BatchWriterConfig defaults will be
* used.
* @return BatchWriter object for configuring and writing data to
*/
BatchWriter createBatchWriter(String tableName, BatchWriterConfig config)
throws TableNotFoundException;
/**
* Factory method to create a BatchWriter. This method uses BatchWriterConfig set when
* AccumuloClient was created. If none was set, BatchWriterConfig defaults will be used.
*
* @param tableName the name of the table to insert data into
* @return BatchWriter object
* @throws TableNotFoundException if table not found
*/
BatchWriter createBatchWriter(String tableName) throws TableNotFoundException;
/**
* Factory method to create a Multi-Table BatchWriter connected to Accumulo. Multi-table batch
* writers can queue data for multiple tables. Also data for multiple tables can be sent to a
* server in a single batch. It's an efficient way to ingest data into multiple tables from a
* single process.
*
* @param config configuration used to create multi-table batch writer. This config will take
* precedence. Any unset values will be merged with the config set when the AccumuloClient
* was created. If no config was set during AccumuloClient creation, BatchWriterConfig
* defaults will be used.
* @return MultiTableBatchWriter object for configuring and writing data to
*/
MultiTableBatchWriter createMultiTableBatchWriter(BatchWriterConfig config);
/**
* Factory method to create a Multi-Table BatchWriter. This method uses BatchWriterConfig set when
* AccumuloClient was created. If none was set, BatchWriterConfig defaults will be used.
*
* @return MultiTableBatchWriter object
*/
MultiTableBatchWriter createMultiTableBatchWriter();
/**
* Factory method to create a Scanner connected to Accumulo.
*
* @param tableName the name of the table to query data from
* @param authorizations A set of authorization labels that will be checked against the column
* visibility of each key in order to filter data. The authorizations passed in must be a
* subset of the accumulo user's set of authorizations. If the accumulo user has
* authorizations (A1, A2) and authorizations (A2, A3) are passed, then an exception will
* be thrown.
*
* @return Scanner object for configuring and querying data with
* @throws TableNotFoundException when the specified table doesn't exist
*
* @see IsolatedScanner
*/
Scanner createScanner(String tableName, Authorizations authorizations)
throws TableNotFoundException;
/**
* Factory method to create a Scanner with all of the user's authorizations.
*
* @param tableName the name of the table to query data from
*
* @return Scanner object for configuring and querying data with
* @throws TableNotFoundException when the specified table doesn't exist
*
* @see IsolatedScanner
*/
Scanner createScanner(String tableName)
throws TableNotFoundException, AccumuloSecurityException, AccumuloException;
/**
* Factory method to create a ConditionalWriter connected to Accumulo.
*
* @param tableName the name of the table to query data from
* @param config configuration used to create conditional writer
*
* @return ConditionalWriter object for writing ConditionalMutations
* @throws TableNotFoundException when the specified table doesn't exist
*/
ConditionalWriter createConditionalWriter(String tableName, ConditionalWriterConfig config)
throws TableNotFoundException;
/**
* Factory method to create a ConditionalWriter connected to Accumulo.
*
* @param tableName the name of the table to query data from
*
* @return ConditionalWriter object for writing ConditionalMutations
* @throws TableNotFoundException when the specified table doesn't exist
*
* @since 2.1.0
*/
ConditionalWriter createConditionalWriter(String tableName) throws TableNotFoundException;
/**
* Get the current user for this AccumuloClient
*
* @return the user name
*/
String whoami();
/**
* Retrieves a TableOperations object to perform table functions, such as create and delete.
*
* @return an object to manipulate tables
*/
TableOperations tableOperations();
/**
* Retrieves a NamespaceOperations object to perform namespace functions, such as create and
* delete.
*
* @return an object to manipulate namespaces
*/
NamespaceOperations namespaceOperations();
/**
* Retrieves a SecurityOperations object to perform user security operations, such as creating
* users.
*
* @return an object to modify users and permissions
*/
SecurityOperations securityOperations();
/**
* Retrieves an InstanceOperations object to modify instance configuration.
*
* @return an object to modify instance configuration
*/
InstanceOperations instanceOperations();
/**
* @return All {@link Properties} used to create client except 'auth.token'
*/
Properties properties();
/**
* Cleans up any resources created by an AccumuloClient like threads and sockets. Anything created
* from this client will likely not work after calling this method. For example a Scanner created
* using this client will likely fail after close is called.
*/
@Override
void close();
/**
* Builds AccumuloClient or client Properties after all options have been specified
*
* @since 2.0.0
*/
interface ClientFactory<T> {
/**
* Override default handling of uncaught exceptions in client threads
*
* @param ueh UncaughtExceptionHandler implementation
* @return AccumuloClient or Properties
* @since 2.1.0
*/
ClientFactory<T> withUncaughtExceptionHandler(UncaughtExceptionHandler ueh);
/**
* Builds AccumuloClient or client Properties
*
* @return AccumuloClient or Properties
*/
T build();
}
/**
* Builder method for setting Accumulo instance and zookeepers
*
* @since 2.0.0
*/
interface InstanceArgs<T> {
AuthenticationArgs<T> to(CharSequence instanceName, CharSequence zookeepers);
}
/**
* Builder methods for creating AccumuloClient using properties
*
* @since 2.0.0
*/
interface PropertyOptions<T> extends InstanceArgs<T> {
/**
* Build using properties file. An example properties file can be found at
* conf/accumulo-client.properties in the Accumulo tarball distribution.
*
* @param propertiesFilePath Path to properties file
* @return this builder
* @see <a href="https://accumulo.apache.org/docs/2.x/configuration/client-properties">Client
* properties documentation</a>
*/
FromOptions<T> from(String propertiesFilePath);
/**
* Build using properties file. An example properties file can be found at
* conf/accumulo-client.properties in the Accumulo tarball distribution.
*
* @param propertiesFile Path to properties file
* @return this builder
* @see <a href="https://accumulo.apache.org/docs/2.x/configuration/client-properties">Client
* properties documentation</a>
*/
FromOptions<T> from(Path propertiesFile);
/**
* Build using Java properties object. An example properties file can be found at
* conf/accumulo-client.properties in the Accumulo tarball distribution.
*
* @param propertiesURL URL path to properties file
* @return this builder
* @see <a href="https://accumulo.apache.org/docs/2.x/configuration/client-properties">Client
* properties documentation</a>
*/
FromOptions<T> from(URL propertiesURL);
/**
* Build using Java properties object. An example properties file can be found at
* conf/accumulo-client.properties in the Accumulo tarball distribution.
*
* @param properties Properties object
* @return this builder
* @see <a href="https://accumulo.apache.org/docs/2.x/configuration/client-properties">Client
* properties documentation</a>
*/
FromOptions<T> from(Properties properties);
}
/**
* Builder methods for authentication
*
* @since 2.0.0
*/
interface AuthenticationArgs<T> {
/**
* Build using password-based credentials
*
* @param username User name
* @param password Password
* @return this builder
*/
ConnectionOptions<T> as(CharSequence username, CharSequence password);
/**
* Build using Kerberos credentials
*
* @param principal Principal
* @param keyTabFile Path to keytab file
* @return this builder
*/
ConnectionOptions<T> as(CharSequence principal, Path keyTabFile);
/**
* Build using specified credentials
*
* @param principal Principal/username
* @param token Authentication token
* @return this builder
*/
ConnectionOptions<T> as(CharSequence principal, AuthenticationToken token);
}
/**
* Build methods for SSL/TLS
*
* @since 2.0.0
*/
interface SslOptions<T> extends ClientFactory<T> {
/**
* Build with SSL trust store
*
* @param path Path to trust store
* @return this builder
*/
SslOptions<T> truststore(CharSequence path);
/**
* Build with SSL trust store
*
* @param path Path to trust store
* @param password Password used to encrypt trust store
* @param type Trust store type
* @return this builder
*/
SslOptions<T> truststore(CharSequence path, CharSequence password, CharSequence type);
/**
* Build with SSL key store
*
* @param path Path to SSL key store
* @return this builder
*/
SslOptions<T> keystore(CharSequence path);
/**
* Build with SSL key store
*
* @param path Path to keystore
* @param password Password used to encrypt key store
* @param type Key store type
* @return this builder
*/
SslOptions<T> keystore(CharSequence path, CharSequence password, CharSequence type);
/**
* Use JSSE system properties to configure SSL
*
* @return this builder
*/
SslOptions<T> useJsse();
}
/**
* Build methods for SASL
*
* @since 2.0.0
*/
interface SaslOptions<T> extends ClientFactory<T> {
/**
* Build with Kerberos Server Primary
*
* @param kerberosServerPrimary Kerberos server primary
* @return this builder
*/
SaslOptions<T> primary(CharSequence kerberosServerPrimary);
/**
* Build with SASL quality of protection
*
* @param qualityOfProtection Quality of protection
* @return this builder
*/
SaslOptions<T> qop(CharSequence qualityOfProtection);
}
/**
* Build methods for connection options
*
* @since 2.0.0
*/
interface ConnectionOptions<T> extends ClientFactory<T> {
/**
* Build using Zookeeper timeout
*
* @param timeout Zookeeper timeout (in milliseconds)
* @return this builder
*/
ConnectionOptions<T> zkTimeout(int timeout);
/**
* Build with SSL/TLS options
*
* @return this builder
*/
SslOptions<T> useSsl();
/**
* Build with SASL options
*
* @return this builder
*/
SaslOptions<T> useSasl();
/**
* Build with BatchWriterConfig defaults for BatchWriter, MultiTableBatchWriter &
* BatchDeleter
*
* @param batchWriterConfig BatchWriterConfig
* @return this builder
*/
ConnectionOptions<T> batchWriterConfig(BatchWriterConfig batchWriterConfig);
/**
* Build with default number of query threads for BatchScanner
*/
ConnectionOptions<T> batchScannerQueryThreads(int numQueryThreads);
/**
* Build with default batch size for Scanner
*/
ConnectionOptions<T> scannerBatchSize(int batchSize);
}
/**
* @since 2.0.0
*/
interface FromOptions<T> extends ConnectionOptions<T>, AuthenticationArgs<T> {
}
}
| 9,985 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/BatchScanner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.data.Range;
/**
* In exchange for possibly <b>returning scanned entries out of order</b>, BatchScanner
* implementations may scan an Accumulo table more efficiently by
* <ul>
* <li>Looking up multiple ranges in parallel. Parallelism is constrained by the number of threads
* available to the BatchScanner, set in its constructor.</li>
* <li>Breaking up large ranges into subranges. Often the number and boundaries of subranges are
* determined by a table's split points.</li>
* <li>Combining multiple ranges into a single RPC call to a tablet server.</li>
* </ul>
*
* The above techniques lead to better performance than a {@link Scanner} in use cases such as
* <ul>
* <li>Retrieving many small ranges</li>
* <li>Scanning a large range that returns many entries</li>
* <li>Running server-side iterators that perform computation, even if few entries are returned from
* the scan itself</li>
* </ul>
*
* To re-emphasize, only use a BatchScanner when you do not care whether returned data is in sorted
* order. Use a {@link Scanner} instead when sorted order is important.
*
* <p>
* A BatchScanner instance will use no more threads than provided in the construction of the
* BatchScanner implementation. Multiple invocations of <code>iterator()</code> will all share the
* same resources of the instance. A new BatchScanner instance should be created to use allocate
* additional threads.
*/
public interface BatchScanner extends ScannerBase {
/**
* Allows scanning over multiple ranges efficiently.
*
* @param ranges specifies the non-overlapping ranges to query
*/
void setRanges(Collection<Range> ranges);
@Override
void close();
/**
* {@inheritDoc}
*
* <p>
* The batch scanner will accomplish as much work as possible before throwing an exception.
* BatchScanner iterators will throw a {@link TimedOutException} when all needed servers timeout.
*/
@Override
void setTimeout(long timeout, TimeUnit timeUnit);
}
| 9,986 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/MultiTableBatchWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
/**
* This class enables efficient batch writing to multiple tables. When creating a batch writer for
* each table, each has its own memory and network resources. Using this class these resources may
* be shared among multiple tables.
*/
public interface MultiTableBatchWriter extends AutoCloseable {
/**
* Returns a BatchWriter for a particular table.
*
* @param table the name of a table whose batch writer you wish to retrieve
* @return an instance of a batch writer for the specified table
* @throws AccumuloException when a general exception occurs with accumulo
* @throws AccumuloSecurityException when the user is not allowed to insert data into that table
* @throws TableNotFoundException when the table does not exist
*/
BatchWriter getBatchWriter(String table)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException;
/**
* Send mutations for all tables to accumulo.
*
* @throws MutationsRejectedException when queued mutations are unable to be inserted
*/
void flush() throws MutationsRejectedException;
/**
* Flush and release all resources.
*
* @throws MutationsRejectedException when queued mutations are unable to be inserted
*
*/
@Override
void close() throws MutationsRejectedException;
/**
* Returns true if this batch writer has been closed.
*
* @return true if this batch writer has been closed
*/
boolean isClosed();
}
| 9,987 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/TimedOutException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
/**
* @since 1.5.0
*/
public class TimedOutException extends RuntimeException {
private final HashSet<String> timedoutServers = new HashSet<>();
private static final long serialVersionUID = 1L;
private static String shorten(Set<String> set) {
if (set.size() < 10) {
return set.toString();
}
return new ArrayList<>(set).subList(0, 10) + " ... " + (set.size() - 10) + " servers not shown";
}
public TimedOutException(Set<String> timedoutServers) {
super("Servers timed out " + shorten(timedoutServers));
this.timedoutServers.addAll(timedoutServers);
}
public TimedOutException(String msg) {
super(msg);
}
public Set<String> getTimedOutSevers() {
return Collections.unmodifiableSet(timedoutServers);
}
}
| 9,988 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/TableNotFoundException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.util.tables.TableNameUtil;
/**
* Thrown when the table specified doesn't exist when it was expected to
*/
public class TableNotFoundException extends Exception {
/**
* Exception to throw if an operation is attempted on a table that doesn't exist.
*/
private static final long serialVersionUID = 1L;
private String tableName;
/**
* @param tableId the internal id of the table that was sought
* @param tableName the visible name of the table that was sought
* @param description the specific reason why it failed
*/
public TableNotFoundException(String tableId, String tableName, String description) {
super("Table" + (tableName != null && !tableName.isEmpty() ? " " + tableName : "")
+ (tableId != null && !tableId.isEmpty() ? " (Id=" + tableId + ")" : "") + " does not exist"
+ (description != null && !description.isEmpty() ? " (" + description + ")" : ""));
this.tableName = tableName;
}
/**
* @param tableId the internal id of the table that was sought
* @param tableName the visible name of the table that was sought
* @param description the specific reason why it failed
* @param cause the exception that caused this failure
*/
public TableNotFoundException(String tableId, String tableName, String description,
Throwable cause) {
this(tableId, tableName, description);
super.initCause(cause);
}
/**
* @param e constructs an exception from a thrift exception
*/
public TableNotFoundException(ThriftTableOperationException e) {
this(e.getTableId(), e.getTableName(), e.getDescription(), e);
}
/**
* @param tableName the original specified table
* @param e indicates that a table wasn't found because the namespace specified in the table name
* wasn't found
*/
public TableNotFoundException(String tableName, NamespaceNotFoundException e) {
this(null, tableName,
"Namespace " + TableNameUtil.qualify(tableName).getFirst() + " does not exist.", e);
}
/**
* @return the name of the table sought
*/
public String getTableName() {
return tableName;
}
}
| 9,989 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/TableOfflineException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import org.apache.accumulo.core.data.TableId;
public class TableOfflineException extends RuntimeException {
private static final long serialVersionUID = 1L;
/**
* @since 2.0.0
*/
public TableOfflineException(String msg) {
super(msg);
}
/**
* @since 2.1.0
*/
public TableOfflineException(TableId tableId, String tableName) {
// @formatter:off
super(String.format("Table %s (%s) is offline",
tableName == null ? "<unknown table>" : tableName,
tableId == null ? "<unknown id>" : tableId));
// @formatter:on
}
/**
* @since 2.0.0
*/
public TableOfflineException(Exception cause) {
super(cause);
}
}
| 9,990 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/NamespaceNotEmptyException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
/**
* Thrown when the namespace specified contains tables
*/
public class NamespaceNotEmptyException extends Exception {
private static final long serialVersionUID = 1L;
private String namespace;
/**
* @param namespaceId the internal id of the namespace
* @param namespaceName the visible name of the namespace
* @param description the specific reason why it failed
*/
public NamespaceNotEmptyException(String namespaceId, String namespaceName, String description) {
super(
"Namespace" + (namespaceName != null && !namespaceName.isEmpty() ? " " + namespaceName : "")
+ (namespaceId != null && !namespaceId.isEmpty() ? " (Id=" + namespaceId + ")" : "")
+ " it not empty, contains at least one table"
+ (description != null && !description.isEmpty() ? " (" + description + ")" : ""));
this.namespace = namespaceName;
}
/**
* @param namespaceId the internal id of the namespace
* @param namespaceName the visible name of the namespace
* @param description the specific reason why it failed
* @param cause the exception that caused this failure
*/
public NamespaceNotEmptyException(String namespaceId, String namespaceName, String description,
Throwable cause) {
this(namespaceId, namespaceName, description);
super.initCause(cause);
}
/**
* @param e constructs an exception from a thrift exception
*/
public NamespaceNotEmptyException(ThriftTableOperationException e) {
this(e.getTableId(), e.getTableName(), e.getDescription(), e);
}
/**
* @return the name of the namespace
*/
public String getNamespaceName() {
return namespace;
}
}
| 9,991 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/ClientSideIteratorScanner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.clientImpl.ScannerOptions;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
import org.apache.accumulo.core.iterators.IteratorAdapter;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.IteratorBuilder;
import org.apache.accumulo.core.iteratorsImpl.IteratorConfigUtil;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.hadoop.io.Text;
/**
* A scanner that instantiates iterators on the client side instead of on the tablet server. This
* can be useful for testing iterators or in cases where you don't want iterators affecting the
* performance of tablet servers.
*
* <p>
* Suggested usage:
*
* <pre>
* <code>
* Scanner scanner = client.createScanner(tableName, authorizations);
* scanner = new ClientSideIteratorScanner(scanner);
* </code>
* </pre>
*
* <p>
* Iterators added to this scanner will be run in the client JVM. Separate scan iterators can be run
* on the server side and client side by adding iterators to the source scanner (which will execute
* server side) and to the client side scanner (which will execute client side).
*/
public class ClientSideIteratorScanner extends ScannerOptions implements Scanner {
private int size;
private Range range;
private boolean isolated = false;
private long readaheadThreshold = Constants.SCANNER_DEFAULT_READAHEAD_THRESHOLD;
private SamplerConfiguration iteratorSamplerConfig;
private class ClientSideIteratorEnvironment implements IteratorEnvironment {
private SamplerConfiguration samplerConfig;
private boolean sampleEnabled;
ClientSideIteratorEnvironment(boolean sampleEnabled, SamplerConfiguration samplerConfig) {
this.sampleEnabled = sampleEnabled;
this.samplerConfig = samplerConfig;
}
@Override
public IteratorScope getIteratorScope() {
return IteratorScope.scan;
}
@Override
public boolean isFullMajorCompaction() {
return false;
}
@Override
public boolean isUserCompaction() {
return false;
}
@Override
public Authorizations getAuthorizations() {
return ClientSideIteratorScanner.this.getAuthorizations();
}
@Override
public IteratorEnvironment cloneWithSamplingEnabled() {
return new ClientSideIteratorEnvironment(true, samplerConfig);
}
@Override
public boolean isSamplingEnabled() {
return sampleEnabled;
}
@Override
public SamplerConfiguration getSamplerConfiguration() {
return samplerConfig;
}
}
/**
* A class that wraps a Scanner in a SortedKeyValueIterator so that other accumulo iterators can
* use it as a source.
*/
private class ScannerTranslatorImpl implements SortedKeyValueIterator<Key,Value> {
protected Scanner scanner;
Iterator<Entry<Key,Value>> iter;
Entry<Key,Value> top = null;
private SamplerConfiguration samplerConfig;
/**
* Constructs an accumulo iterator from a scanner.
*
* @param scanner the scanner to iterate over
*/
public ScannerTranslatorImpl(final Scanner scanner, SamplerConfiguration samplerConfig) {
this.scanner = scanner;
this.samplerConfig = samplerConfig;
}
@Override
public void init(final SortedKeyValueIterator<Key,Value> source,
final Map<String,String> options, final IteratorEnvironment env) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public boolean hasTop() {
return top != null;
}
@Override
public void next() throws IOException {
if (iter.hasNext()) {
top = iter.next();
} else {
top = null;
}
}
@Override
public void seek(final Range range, final Collection<ByteSequence> columnFamilies,
final boolean inclusive) throws IOException {
if (!inclusive && !columnFamilies.isEmpty()) {
throw new IllegalArgumentException();
}
scanner.setRange(range);
scanner.clearColumns();
for (ByteSequence colf : columnFamilies) {
scanner.fetchColumnFamily(new Text(colf.toArray()));
}
if (samplerConfig == null) {
scanner.clearSamplerConfiguration();
} else {
scanner.setSamplerConfiguration(samplerConfig);
}
iter = scanner.iterator();
next();
}
@Override
public Key getTopKey() {
return top.getKey();
}
@Override
public Value getTopValue() {
return top.getValue();
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(final IteratorEnvironment env) {
return new ScannerTranslatorImpl(scanner,
env.isSamplingEnabled() ? env.getSamplerConfiguration() : null);
}
}
private ScannerTranslatorImpl smi;
/**
* Constructs a scanner that can execute client-side iterators.
*
* @param scanner the source scanner
*/
public ClientSideIteratorScanner(final Scanner scanner) {
smi = new ScannerTranslatorImpl(scanner, scanner.getSamplerConfiguration());
this.range = scanner.getRange();
this.size = scanner.getBatchSize();
this.retryTimeout = scanner.getTimeout(MILLISECONDS);
this.batchTimeout = scanner.getTimeout(MILLISECONDS);
this.readaheadThreshold = scanner.getReadaheadThreshold();
SamplerConfiguration samplerConfig = scanner.getSamplerConfiguration();
if (samplerConfig != null) {
setSamplerConfiguration(samplerConfig);
}
}
/**
* Sets the source Scanner.
*/
public void setSource(final Scanner scanner) {
smi = new ScannerTranslatorImpl(scanner, scanner.getSamplerConfiguration());
}
@Override
public Iterator<Entry<Key,Value>> iterator() {
smi.scanner.setBatchSize(size);
smi.scanner.setTimeout(retryTimeout, MILLISECONDS);
smi.scanner.setBatchTimeout(batchTimeout, MILLISECONDS);
smi.scanner.setReadaheadThreshold(readaheadThreshold);
if (isolated) {
smi.scanner.enableIsolation();
} else {
smi.scanner.disableIsolation();
}
smi.samplerConfig = getSamplerConfiguration();
final TreeMap<Integer,IterInfo> tm = new TreeMap<>();
for (IterInfo iterInfo : serverSideIteratorList) {
tm.put(iterInfo.getPriority(), iterInfo);
}
SortedKeyValueIterator<Key,Value> skvi;
try {
IteratorEnvironment iterEnv = new ClientSideIteratorEnvironment(
getSamplerConfiguration() != null, getIteratorSamplerConfigurationInternal());
IteratorBuilder ib =
IteratorBuilder.builder(tm.values()).opts(serverSideIteratorOptions).env(iterEnv).build();
skvi = IteratorConfigUtil.loadIterators(smi, ib);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
final Set<ByteSequence> colfs = new TreeSet<>();
for (Column c : this.getFetchedColumns()) {
colfs.add(new ArrayByteSequence(c.getColumnFamily()));
}
try {
skvi.seek(range, colfs, true);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new IteratorAdapter(skvi);
}
@Override
public Authorizations getAuthorizations() {
return smi.scanner.getAuthorizations();
}
@Override
public void setRange(final Range range) {
this.range = range;
}
@Override
public Range getRange() {
return range;
}
@Override
public void setBatchSize(final int size) {
this.size = size;
}
@Override
public int getBatchSize() {
return size;
}
@Override
public void enableIsolation() {
this.isolated = true;
}
@Override
public void disableIsolation() {
this.isolated = false;
}
@Override
public long getReadaheadThreshold() {
return readaheadThreshold;
}
@Override
public void setReadaheadThreshold(long batches) {
if (batches < 0) {
throw new IllegalArgumentException(
"Number of batches before read-ahead must be non-negative");
}
this.readaheadThreshold = batches;
}
private SamplerConfiguration getIteratorSamplerConfigurationInternal() {
SamplerConfiguration scannerSamplerConfig = getSamplerConfiguration();
if (scannerSamplerConfig != null) {
if (iteratorSamplerConfig != null && !iteratorSamplerConfig.equals(scannerSamplerConfig)) {
throw new IllegalStateException("Scanner and iterator sampler configuration differ");
}
return scannerSamplerConfig;
}
return iteratorSamplerConfig;
}
/**
* This is provided for the case where no sampler configuration is set on the scanner, but there
* is a need to create iterator deep copies that have sampling enabled. If sampler configuration
* is set on the scanner, then this method does not need to be called inorder to create deep
* copies with sampling.
*
* <p>
* Setting this differently than the scanners sampler configuration may cause exceptions.
*
* @since 1.8.0
*/
public void setIteratorSamplerConfiguration(SamplerConfiguration sc) {
requireNonNull(sc);
this.iteratorSamplerConfig = sc;
}
/**
* Clear any iterator sampler configuration.
*
* @since 1.8.0
*/
public void clearIteratorSamplerConfiguration() {
this.iteratorSamplerConfig = null;
}
/**
* @return currently set iterator sampler configuration.
*
* @since 1.8.0
*/
public SamplerConfiguration getIteratorSamplerConfiguration() {
return iteratorSamplerConfig;
}
@Override
public void close() {
smi.scanner.close();
}
}
| 9,992 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/BatchWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client;
import org.apache.accumulo.core.data.Mutation;
/**
* Send Mutations to a single Table in Accumulo.
* <p>
* When the user uses a client to create a BatchWriter, they specify how much memory and how many
* threads it should use. As the user adds mutations to the batch writer, it buffers them. Once the
* buffered mutations have used half of the user specified buffer, the mutations are dumped into the
* background to be written by a thread pool. If the user specified memory completely fills up, then
* writes are held. When a user calls flush, it does not return until all buffered mutations are
* written.
* <p>
* In the event that an MutationsRejectedException exception is thrown by one of the methods on a
* BatchWriter instance, the user should close the current instance and create a new instance.
*/
public interface BatchWriter extends AutoCloseable {
/**
* Queues one mutation to write.
*
* @param m the mutation to add
* @throws MutationsRejectedException this could be thrown because current or previous mutations
* failed
*/
void addMutation(Mutation m) throws MutationsRejectedException;
/**
* Queues several mutations to write.
*
* @param iterable allows adding any number of mutations iteratively
* @throws MutationsRejectedException this could be thrown because current or previous mutations
* failed
*/
void addMutations(Iterable<Mutation> iterable) throws MutationsRejectedException;
/**
* Send any buffered mutations to Accumulo immediately.
*
* @throws MutationsRejectedException this could be thrown because current or previous mutations
* failed
*/
void flush() throws MutationsRejectedException;
/**
* Flush and release any resources.
*
* @throws MutationsRejectedException this could be thrown because current or previous mutations
* failed
*/
@Override
void close() throws MutationsRejectedException;
}
| 9,993 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security/SecurityErrorCode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client.security;
public enum SecurityErrorCode {
DEFAULT_SECURITY_ERROR,
BAD_CREDENTIALS,
PERMISSION_DENIED,
USER_DOESNT_EXIST,
CONNECTION_ERROR,
USER_EXISTS,
GRANT_INVALID,
BAD_AUTHORIZATIONS,
INVALID_INSTANCEID,
TABLE_DOESNT_EXIST,
UNSUPPORTED_OPERATION,
INVALID_TOKEN,
AUTHENTICATOR_FAILED,
AUTHORIZOR_FAILED,
PERMISSIONHANDLER_FAILED,
TOKEN_EXPIRED,
SERIALIZATION_ERROR,
INSUFFICIENT_PROPERTIES,
NAMESPACE_DOESNT_EXIST
}
| 9,994 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security/tokens/DelegationToken.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client.security.tokens;
import org.apache.accumulo.core.client.admin.SecurityOperations;
/**
* An {@link AuthenticationToken} that wraps a "Hadoop style" delegation token created by Accumulo.
* The intended scope of this token is when a KerberosToken cannot be used instead. The most common
* reason for this is within YARN jobs. The Kerberos credentials of the user are not passed over the
* wire to the job itself. The delegation token serves as a mechanism to obtain a transient shared
* secret with Accumulo using a {@link KerberosToken} and then run some task authenticating with
* that shared secret.
*
* <p>
* Obtain a delegation token by calling
* {@link SecurityOperations#getDelegationToken(org.apache.accumulo.core.client.admin.DelegationTokenConfig)}
*
* @since 1.7.0
*/
public interface DelegationToken extends AuthenticationToken {
}
| 9,995 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security/tokens/KerberosToken.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client.security.tokens;
import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Collections;
import java.util.Set;
import javax.security.auth.DestroyFailedException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
/**
* Authentication token for Kerberos authenticated clients
*
* @since 1.7.0
*/
public class KerberosToken implements AuthenticationToken {
public static final String CLASS_NAME = KerberosToken.class.getName();
private static final int VERSION = 1;
private String principal;
private File keytab;
/**
* Creates a token using the provided principal and the currently logged-in user via
* {@link UserGroupInformation}.
*
* This method expects the current user (as defined by
* {@link UserGroupInformation#getCurrentUser()} to be authenticated via Kerberos or as a Proxy
* (on top of another user). An {@link IllegalArgumentException} will be thrown for all other
* cases.
*
* @param principal The user that is logged in
* @throws IllegalArgumentException If the current user is not authentication via Kerberos or
* Proxy methods.
* @see UserGroupInformation#getCurrentUser()
* @see UserGroupInformation#getAuthenticationMethod()
*/
public KerberosToken(String principal) throws IOException {
this.principal = requireNonNull(principal);
validateAuthMethod(UserGroupInformation.getCurrentUser().getAuthenticationMethod());
}
static void validateAuthMethod(AuthenticationMethod authMethod) {
// There is also KERBEROS_SSL but that appears to be deprecated/OBE
checkArgument(
authMethod == AuthenticationMethod.KERBEROS || authMethod == AuthenticationMethod.PROXY,
"KerberosToken expects KERBEROS or PROXY authentication for the current "
+ "UserGroupInformation user. Saw " + authMethod);
}
/**
* Creates a Kerberos token for the specified principal using the provided keytab. The principal
* and keytab combination are verified by attempting a log in.
* <p>
* This constructor does not have any side effects.
*
* @param principal The Kerberos principal
* @param keytab A keytab file containing the principal's credentials.
*/
public KerberosToken(String principal, File keytab) throws IOException {
this.principal = requireNonNull(principal, "Principal was null");
this.keytab = requireNonNull(keytab, "Keytab was null");
checkArgument(keytab.exists() && keytab.isFile(), "Keytab was not a normal file");
}
/**
* Creates a token using the login user as returned by
* {@link UserGroupInformation#getCurrentUser()}
*
* @throws IOException If the current logged in user cannot be computed.
*/
public KerberosToken() throws IOException {
this(UserGroupInformation.getCurrentUser().getUserName());
}
@Override
public KerberosToken clone() {
try {
KerberosToken clone = (KerberosToken) super.clone();
clone.principal = principal;
clone.keytab = keytab == null ? keytab : keytab.getCanonicalFile();
return clone;
} catch (CloneNotSupportedException e) {
throw new IllegalStateException(e);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof KerberosToken)) {
return false;
}
KerberosToken other = (KerberosToken) obj;
return principal.equals(other.principal);
}
/**
* The identity of the user to which this token belongs to according to Kerberos
*
* @return The principal
*/
public String getPrincipal() {
return principal;
}
/**
* The keytab file used to perform Kerberos login. Optional, may be null.
*/
public File getKeytab() {
return keytab;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(VERSION);
}
@Override
public void readFields(DataInput in) throws IOException {
int actualVersion = in.readInt();
if (actualVersion != VERSION) {
throw new IOException("Did not find expected version in serialized KerberosToken");
}
}
@Override
public synchronized void destroy() throws DestroyFailedException {
principal = null;
}
@Override
public boolean isDestroyed() {
return principal == null;
}
@Override
public void init(Properties properties) {
}
@Override
public Set<TokenProperty> getProperties() {
return Collections.emptySet();
}
@Override
public int hashCode() {
return principal.hashCode();
}
}
| 9,996 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security/tokens/AuthenticationToken.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client.security.tokens;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import javax.security.auth.DestroyFailedException;
import javax.security.auth.Destroyable;
import org.apache.hadoop.io.Writable;
/**
* @since 1.5.0
*/
public interface AuthenticationToken extends Writable, Destroyable, Cloneable {
/**
* A utility class to serialize/deserialize {@link AuthenticationToken} objects.<br>
* Unfortunately, these methods are provided in an inner-class, to avoid breaking the interface
* API.
*
* @since 1.6.0
*/
final class AuthenticationTokenSerializer {
/**
* A convenience method to create tokens from serialized bytes, created by
* {@link #serialize(AuthenticationToken)}
* <p>
* The specified tokenType will be instantiated, and used to deserialize the decoded bytes. The
* resulting object will then be returned to the caller.
*
* @param tokenType the token class to use to deserialize the bytes
* @param tokenBytes the token-specific serialized bytes
* @return an {@link AuthenticationToken} instance of the type specified by tokenType
* @see #serialize(AuthenticationToken)
*/
public static <T extends AuthenticationToken> T deserialize(Class<T> tokenType,
byte[] tokenBytes) {
T type = null;
try {
type = tokenType.getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new IllegalArgumentException("Cannot instantiate " + tokenType.getName(), e);
}
ByteArrayInputStream bais = new ByteArrayInputStream(tokenBytes);
DataInputStream in = new DataInputStream(bais);
try {
type.readFields(in);
} catch (IOException e) {
throw new IllegalArgumentException(
"Cannot deserialize provided byte array as class " + tokenType.getName(), e);
}
try {
in.close();
} catch (IOException e) {
throw new IllegalStateException("Shouldn't happen", e);
}
return type;
}
/**
* An alternate version of {@link #deserialize(Class, byte[])} that accepts a token class name
* rather than a token class.
*
* @param tokenClassName the fully-qualified class name to be returned
* @see #serialize(AuthenticationToken)
*/
public static AuthenticationToken deserialize(String tokenClassName, byte[] tokenBytes) {
try {
@SuppressWarnings("unchecked")
var tokenType = (Class<? extends AuthenticationToken>) Class.forName(tokenClassName);
return deserialize(tokenType, tokenBytes);
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Class not available " + tokenClassName, e);
}
}
/**
* A convenience method to serialize tokens.
* <p>
* The provided {@link AuthenticationToken} will be serialized to bytes by its own
* implementation and returned to the caller.
*
* @param token the token to serialize
* @return a serialized representation of the provided {@link AuthenticationToken}
* @see #deserialize(Class, byte[])
*/
public static byte[] serialize(AuthenticationToken token) {
try (var baos = new ByteArrayOutputStream(); var out = new DataOutputStream(baos)) {
token.write(out);
return baos.toByteArray();
} catch (IOException e) {
throw new UncheckedIOException("Bug found in serialization code", e);
}
}
}
class Properties implements Destroyable, Map<String,char[]> {
private boolean destroyed = false;
private HashMap<String,char[]> map = new HashMap<>();
private void checkDestroyed() {
if (destroyed) {
throw new IllegalStateException();
}
}
public char[] put(String key, CharSequence value) {
checkDestroyed();
char[] toPut = new char[value.length()];
for (int i = 0; i < value.length(); i++) {
toPut[i] = value.charAt(i);
}
return map.put(key, toPut);
}
public void putAllStrings(Map<String,? extends CharSequence> map) {
checkDestroyed();
for (Map.Entry<String,? extends CharSequence> entry : map.entrySet()) {
put(entry.getKey(), entry.getValue());
}
}
@Override
public void destroy() throws DestroyFailedException {
for (String key : this.keySet()) {
char[] val = this.get(key);
Arrays.fill(val, (char) 0);
}
this.clear();
destroyed = true;
}
@Override
public boolean isDestroyed() {
return destroyed;
}
@Override
public int size() {
checkDestroyed();
return map.size();
}
@Override
public boolean isEmpty() {
checkDestroyed();
return map.isEmpty();
}
@Override
public boolean containsKey(Object key) {
checkDestroyed();
String k = (String) key;
return map.containsKey(k);
}
@Override
public boolean containsValue(Object value) {
checkDestroyed();
char[] v = (char[]) value;
return map.containsValue(v);
}
@Override
public char[] get(Object key) {
checkDestroyed();
String k = (String) key;
return map.get(k);
}
@Override
public char[] put(String key, char[] value) {
checkDestroyed();
return map.put(key, value);
}
@Override
public char[] remove(Object key) {
checkDestroyed();
String k = (String) key;
return map.remove(k);
}
@Override
public void putAll(Map<? extends String,? extends char[]> m) {
checkDestroyed();
map.putAll(m);
}
@Override
public void clear() {
checkDestroyed();
map.clear();
}
@Override
public Set<String> keySet() {
checkDestroyed();
return map.keySet();
}
@Override
public Collection<char[]> values() {
checkDestroyed();
return map.values();
}
@Override
public Set<Map.Entry<String,char[]>> entrySet() {
checkDestroyed();
return map.entrySet();
}
}
class TokenProperty implements Comparable<TokenProperty> {
private String key, description;
private boolean masked;
public TokenProperty(String name, String description, boolean mask) {
this.key = name;
this.description = description;
this.masked = mask;
}
@Override
public String toString() {
return this.key + " - " + description;
}
public String getKey() {
return this.key;
}
public String getDescription() {
return this.description;
}
public boolean getMask() {
return this.masked;
}
@Override
public int hashCode() {
return key.hashCode();
}
@Override
public boolean equals(Object o) {
if (o instanceof TokenProperty) {
return ((TokenProperty) o).key.equals(key);
}
return false;
}
@Override
public int compareTo(TokenProperty o) {
return key.compareTo(o.key);
}
}
void init(Properties properties);
Set<TokenProperty> getProperties();
AuthenticationToken clone();
}
| 9,997 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client.security.tokens;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Proxy;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.util.Arrays;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.security.auth.DestroyFailedException;
import org.apache.accumulo.core.util.ByteBufferUtil;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
* @since 1.5.0
*/
public class PasswordToken implements AuthenticationToken {
private byte[] password = null;
public byte[] getPassword() {
return Arrays.copyOf(password, password.length);
}
/**
* Constructor for use with {@link Writable}. Call {@link #readFields(DataInput)}.
*/
public PasswordToken() {
password = new byte[0];
}
/**
* Constructs a token from a copy of the password. Destroying the argument after construction will
* not destroy the copy in this token, and destroying this token will only destroy the copy held
* inside this token, not the argument.
*
* Password tokens created with this constructor will store the password as UTF-8 bytes.
*/
public PasswordToken(CharSequence password) {
setPassword(CharBuffer.wrap(password));
}
/**
* Constructs a token from a copy of the password. Destroying the argument after construction will
* not destroy the copy in this token, and destroying this token will only destroy the copy held
* inside this token, not the argument.
*/
public PasswordToken(byte[] password) {
this.password = Arrays.copyOf(password, password.length);
}
/**
* Constructs a token from a copy of the password. Destroying the argument after construction will
* not destroy the copy in this token, and destroying this token will only destroy the copy held
* inside this token, not the argument.
*/
public PasswordToken(ByteBuffer password) {
this.password = ByteBufferUtil.toBytes(password);
}
@Override
public void readFields(DataInput arg0) throws IOException {
int version = arg0.readInt();
// -1 is null, consistent with legacy format; legacy format length must be >= -1
// so, use -2 as a magic number to indicate the new format
if (version == -1) {
password = null;
} else if (version == -2) {
byte[] passwordTmp = new byte[arg0.readInt()];
arg0.readFully(passwordTmp);
password = passwordTmp;
} else {
// legacy format; should avoid reading/writing compressed byte arrays using WritableUtils,
// because GZip is expensive and it doesn't actually compress passwords very well
AtomicBoolean calledFirstReadInt = new AtomicBoolean(false);
DataInput wrapped = (DataInput) Proxy.newProxyInstance(DataInput.class.getClassLoader(),
arg0.getClass().getInterfaces(), (obj, method, args) -> {
// wrap the original DataInput in order to simulate replacing the integer that was
// previously read and then not used back into the input, after it didn't match -2
if (!calledFirstReadInt.get() && method.getName().equals("readInt")) {
calledFirstReadInt.set(true);
return version;
}
try {
return method.invoke(arg0, args);
} catch (InvocationTargetException e) {
throw e.getCause();
}
});
password = WritableUtils.readCompressedByteArray(wrapped);
}
}
@Override
public void write(DataOutput arg0) throws IOException {
if (password == null) {
arg0.writeInt(-1);
return;
}
arg0.writeInt(-2); // magic number
arg0.writeInt(password.length);
arg0.write(password);
}
@Override
public void destroy() throws DestroyFailedException {
Arrays.fill(password, (byte) 0x00);
password = null;
}
@Override
public boolean isDestroyed() {
return password == null;
}
@Override
public int hashCode() {
return Arrays.hashCode(password);
}
@Override
public boolean equals(Object obj) {
// Instances of PasswordToken should only be considered equal if they are of the same type.
// This check is done here to ensure that this class is equal to the class of the object being
// checked.
return this == obj || (obj != null && getClass().equals(obj.getClass())
&& Arrays.equals(password, ((PasswordToken) obj).password));
}
@Override
public PasswordToken clone() {
try {
PasswordToken clone = (PasswordToken) super.clone();
clone.password = Arrays.copyOf(password, password.length);
return clone;
} catch (CloneNotSupportedException e) {
throw new IllegalStateException(e);
}
}
protected void setPassword(byte[] password) {
this.password = Arrays.copyOf(password, password.length);
}
protected void setPassword(CharBuffer charBuffer) {
// encode() kicks back a C-string, which is not compatible with the old passwording system
ByteBuffer bb = UTF_8.encode(charBuffer);
// create array using byte buffer length
this.password = new byte[bb.remaining()];
bb.get(this.password);
if (!bb.isReadOnly()) {
// clear byte buffer
bb.rewind();
while (bb.remaining() > 0) {
bb.put((byte) 0);
}
}
}
@Override
public void init(Properties properties) {
if (properties.containsKey("password")) {
setPassword(CharBuffer.wrap(properties.get("password")));
} else {
throw new IllegalArgumentException("Missing 'password' property");
}
}
@Override
public Set<TokenProperty> getProperties() {
Set<TokenProperty> internal = new LinkedHashSet<>();
internal.add(new TokenProperty("password", "the password for the principal", true));
return internal;
}
}
| 9,998 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/client/security/tokens/NullToken.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.client.security.tokens;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Collections;
import java.util.Set;
import javax.security.auth.DestroyFailedException;
/**
* @since 1.5.0
*/
public class NullToken implements AuthenticationToken {
@Override
public void readFields(DataInput arg0) throws IOException {}
@Override
public void write(DataOutput arg0) throws IOException {}
@Override
public void destroy() throws DestroyFailedException {}
@Override
public boolean isDestroyed() {
return false;
}
@Override
public NullToken clone() {
try {
return (NullToken) super.clone();
} catch (CloneNotSupportedException e) {
throw new AssertionError("Inconceivable", e);
}
}
@Override
public boolean equals(Object obj) {
return obj instanceof NullToken;
}
@Override
public void init(Properties properties) {}
@Override
public Set<TokenProperty> getProperties() {
return Collections.emptySet();
}
@Override
public int hashCode() {
return 0;
}
}
| 9,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.