index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/scan/SimpleScanDispatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.accumulo.core.client.ScannerBase;
import org.apache.accumulo.core.spi.scan.ScanDispatch.CacheUsage;
/**
* If no options are given, then this will default to an executor named {@code default} and
* {@link CacheUsage#TABLE} for index and data cache. This dispatcher supports the following
* options.
*
* <ul>
* <li>{@code table.scan.dispatcher.opts.executor=<scan executor name>} : dispatches all scans to
* the named executor.</li>
* <li>{@code table.scan.dispatcher.opts.multi_executor=<scan executor name>} : dispatches batch
* scans to the named executor.</li>
* <li>{@code table.scan.dispatcher.opts.single_executor=<scan executor name>} : dispatches regular
* scans to the named executor.</li>
* <li>{@code table.scan.dispatcher.opts.executor.<type>=<scan executor name>} : dispatches scans
* that set the hint {@code scan_type=<type>} to the named executor. If this setting matches then it
* takes precedence over all other settings. See {@link ScannerBase#setExecutionHints(Map)}</li>
* <li>{@code table.scan.dispatcher.opts.cacheUsage.<type>[.index|.data]=enabled|disabled|opportunistic|table}
* : for scans that set the hint {@code scan_type=<type>} determines how the scan will use cache.
* </ul>
*
* The {@code multi_executor} and {@code single_executor} options override the {@code executor}
* option.
*/
public class SimpleScanDispatcher implements ScanDispatcher {
private final String EXECUTOR_PREFIX = "executor.";
private final Set<String> VALID_OPTS = Set.of("executor", "multi_executor", "single_executor");
private ScanDispatch singleDispatch;
private ScanDispatch multiDispatch;
private Map<String,Map<ScanInfo.Type,ScanDispatch>> hintDispatch;
private static Pattern CACHE_PATTERN = Pattern.compile("cacheUsage[.](\\w+)([.](index|data))?");
public static final String DEFAULT_SCAN_EXECUTOR_NAME = "default";
@Override
public void init(InitParameters params) {
Map<String,String> options = params.getOptions();
Map<String,CacheUsage> indexCacheUsage = new HashMap<>();
Map<String,CacheUsage> dataCacheUsage = new HashMap<>();
Map<String,String> scanExecutors = new HashMap<>();
Set<String> hintScanTypes = new HashSet<>();
options.forEach((k, v) -> {
Matcher cacheMatcher = CACHE_PATTERN.matcher(k);
if (k.startsWith(EXECUTOR_PREFIX)) {
String hintScanType = k.substring(EXECUTOR_PREFIX.length());
scanExecutors.put(hintScanType, v);
hintScanTypes.add(hintScanType);
} else if (cacheMatcher.matches()) {
String hintScanType = cacheMatcher.group(1);
CacheUsage usage = CacheUsage.valueOf(v.toUpperCase());
String cacheType = cacheMatcher.group(3);
hintScanTypes.add(hintScanType);
if ("index".equals(cacheType)) {
indexCacheUsage.put(hintScanType, usage);
} else if ("data".equals(cacheType)) {
dataCacheUsage.put(hintScanType, usage);
} else {
indexCacheUsage.put(hintScanType, usage);
dataCacheUsage.put(hintScanType, usage);
}
} else if (!VALID_OPTS.contains(k)) {
throw new IllegalArgumentException("Invalid option " + k);
}
});
// This method pre-computes all possible scan dispatch objects that could ever be needed.
// This is done to make the dispatch method more efficient. If the number of config permutations
// grows, this approach may have to be abandoned. For now its tractable.
ScanDispatch baseDispatch = Optional.ofNullable(options.get("executor"))
.map(name -> ScanDispatch.builder().setExecutorName(name).build())
.orElse(DefaultScanDispatch.DEFAULT_SCAN_DISPATCH);
singleDispatch = Optional.ofNullable(options.get("single_executor"))
.map(name -> ScanDispatch.builder().setExecutorName(name).build()).orElse(baseDispatch);
multiDispatch = Optional.ofNullable(options.get("multi_executor"))
.map(name -> ScanDispatch.builder().setExecutorName(name).build()).orElse(baseDispatch);
hintDispatch = hintScanTypes.stream()
.collect(Collectors.toUnmodifiableMap(Function.identity(), hintScanType -> {
EnumMap<ScanInfo.Type,ScanDispatch> precomupted = new EnumMap<>(ScanInfo.Type.class);
CacheUsage iCacheUsage = indexCacheUsage.getOrDefault(hintScanType, CacheUsage.TABLE);
CacheUsage dCacheUsage = dataCacheUsage.getOrDefault(hintScanType, CacheUsage.TABLE);
precomupted.put(ScanInfo.Type.SINGLE,
ScanDispatch.builder()
.setExecutorName(
scanExecutors.getOrDefault(hintScanType, singleDispatch.getExecutorName()))
.setIndexCacheUsage(iCacheUsage).setDataCacheUsage(dCacheUsage).build());
precomupted.put(ScanInfo.Type.MULTI,
ScanDispatch.builder()
.setExecutorName(
scanExecutors.getOrDefault(hintScanType, multiDispatch.getExecutorName()))
.setIndexCacheUsage(iCacheUsage).setDataCacheUsage(dCacheUsage).build());
return precomupted;
}));
}
@Override
public ScanDispatch dispatch(DispatchParameters params) {
ScanInfo scanInfo = params.getScanInfo();
if (!hintDispatch.isEmpty()) {
String hintScanType = scanInfo.getExecutionHints().get("scan_type");
if (hintScanType != null) {
var precomputedDispatch = hintDispatch.get(hintScanType);
if (precomputedDispatch != null) {
return precomputedDispatch.get(scanInfo.getScanType());
}
}
}
switch (scanInfo.getScanType()) {
case MULTI:
return multiDispatch;
case SINGLE:
return singleDispatch;
default:
throw new IllegalArgumentException("Unexpected scan type " + scanInfo.getScanType());
}
}
}
| 9,800 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/common/ServiceEnvironment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.common;
import org.apache.accumulo.core.client.PluginEnvironment;
import org.apache.accumulo.core.data.TableId;
/**
* This interface exposes Accumulo system level information to plugins in a stable manner. The
* purpose of this interface is to insulate plugins from internal refactorings and changes to
* Accumulo.
*
* <p>
* Having this allows adding methods to the SPI which are not desired in PluginEnvironment which is
* public API.
*
* @since 2.0.0
*/
public interface ServiceEnvironment extends PluginEnvironment {
/**
* @since 2.0.0
*/
interface Configuration extends PluginEnvironment.Configuration {
}
/**
* @return A view of Accumulo's system level configuration. This is backed by system level config
* in zookeeper, which falls back to site configuration, which falls back to the default
* configuration.
*/
@Override
Configuration getConfiguration();
/**
* @return a view of a table's configuration. When requesting properties that start with
* {@code table.} the returned configuration may give different values for different
* tables. For other properties the returned configuration will return the same value as
* {@link #getConfiguration()}.
*
*/
@Override
Configuration getConfiguration(TableId tableId);
}
| 9,801 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/common/Stats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.common;
/**
* @since 2.0.0
*/
public interface Stats {
/**
* @return the minimum data point seen, or 0 if no data was seen
*/
long min();
/**
* @return the maximum data point seen, or 0 if no data was seen
*/
long max();
/**
* @return the mean of the data points seen, or {@link Double#NaN} if no data was seen
*/
double mean();
/**
* @return the sum of the data points seen, or 0 if no data was seen
*/
long sum();
/**
* @return the number of data points seen
*/
long num();
}
| 9,802 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/common/ContextClassLoaderFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.common;
/**
* The ContextClassLoaderFactory provides a mechanism for various Accumulo components to use a
* custom ClassLoader for specific contexts, such as loading table iterators. This factory is
* initialized at startup and supplies ClassLoaders when provided a context.
*
* <p>
* This factory can be configured using the <code>general.context.class.loader.factory</code>
* property. All implementations of this factory must have a default (no-argument) public
* constructor.
*
* <p>
* A default implementation is provided for Accumulo 2.x to retain existing context class loader
* behavior based on per-table configuration. However, after Accumulo 2.x, the default is expected
* to change to a simpler implementation, and users will need to provide their own implementation to
* support advanced context class loading features. Some implementations may be maintained by the
* Accumulo developers in a separate package. Check the Accumulo website or contact the developers
* for more details on the status of these implementations.
*
* <p>
* Because this factory is expected to be instantiated early in the application startup process,
* configuration is expected to be provided within the environment (such as in Java system
* properties or process environment variables), and is implementation-specific. However, some
* limited environment is also available so implementations can have access to Accumulo's own system
* configuration.
*
* @since 2.1.0
*/
public interface ContextClassLoaderFactory {
/**
* Pass the service environment to allow for additional class loader configuration
*
* @param env the class loader environment
*/
default void init(ContextClassLoaderEnvironment env) {}
/**
* Get the class loader for the given context. Callers should not cache the ClassLoader result as
* it may change if/when the ClassLoader reloads. Implementations should throw a RuntimeException
* of some type (such as IllegalArgumentException) if the provided context is not supported or
* fails to be constructed.
*
* @param context the name of the context that represents a class loader that is managed by this
* factory. Currently, Accumulo will only call this method for non-null and non-empty
* context. For empty or null context, Accumulo will use the system classloader without
* consulting this plugin.
* @return the class loader for the given context
*/
ClassLoader getClassLoader(String context);
}
| 9,803 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/common/IteratorConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.common;
import java.util.Map;
/**
* Provides information about a configured Accumulo Iterator
*
* @since 2.0.0
*/
public interface IteratorConfiguration {
String getIteratorClass();
String getName();
int getPriority();
Map<String,String> getOptions();
}
| 9,804 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/common/ContextClassLoaderEnvironment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.common;
/**
* The environment provided to the context class loader factory for its use
*
* @since 2.1.1
*/
public interface ContextClassLoaderEnvironment {
/**
* Get the service environment configuration
*
* @return The configuration
*/
ServiceEnvironment.Configuration getConfiguration();
}
| 9,805 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/TableLoadBalancer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.accumulo.core.classloader.ClassLoaderUtil;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.manager.balancer.AssignmentParamsImpl;
import org.apache.accumulo.core.manager.balancer.BalanceParamsImpl;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @since 2.1.0
*/
public class TableLoadBalancer implements TabletBalancer {
private static final Logger log = LoggerFactory.getLogger(TableLoadBalancer.class);
protected BalancerEnvironment environment;
Map<TableId,TabletBalancer> perTableBalancers = new HashMap<>();
@Override
public void init(BalancerEnvironment balancerEnvironment) {
this.environment = balancerEnvironment;
}
private TabletBalancer constructNewBalancerForTable(String clazzName, TableId tableId)
throws Exception {
String context = environment.tableContext(tableId);
Class<? extends TabletBalancer> clazz =
ClassLoaderUtil.loadClass(context, clazzName, TabletBalancer.class);
Constructor<? extends TabletBalancer> constructor = clazz.getConstructor(TableId.class);
return constructor.newInstance(tableId);
}
protected String getLoadBalancerClassNameForTable(TableId table) {
if (environment.isTableOnline(table)) {
return environment.getConfiguration(table).get(Property.TABLE_LOAD_BALANCER.getKey());
}
return null;
}
protected TabletBalancer getBalancerForTable(TableId tableId) {
TabletBalancer balancer = perTableBalancers.get(tableId);
String clazzName = getLoadBalancerClassNameForTable(tableId);
if (clazzName == null) {
clazzName = SimpleLoadBalancer.class.getName();
}
if (balancer != null) {
if (!clazzName.equals(balancer.getClass().getName())) {
// the balancer class for this table does not match the class specified in the configuration
try {
balancer = constructNewBalancerForTable(clazzName, tableId);
perTableBalancers.put(tableId, balancer);
balancer.init(environment);
log.info("Loaded new class {} for table {}", clazzName, tableId);
} catch (Exception e) {
log.warn("Failed to load table balancer class {} for table {}", clazzName, tableId, e);
}
}
}
if (balancer == null) {
try {
balancer = constructNewBalancerForTable(clazzName, tableId);
log.info("Loaded class {} for table {}", clazzName, tableId);
} catch (Exception e) {
log.warn("Failed to load table balancer class {} for table {}", clazzName, tableId, e);
}
if (balancer == null) {
log.info("Using balancer {} for table {}", SimpleLoadBalancer.class.getName(), tableId);
balancer = new SimpleLoadBalancer(tableId);
}
perTableBalancers.put(tableId, balancer);
balancer.init(environment);
}
return balancer;
}
@Override
public void getAssignments(AssignmentParameters params) {
// separate the unassigned into tables
Map<TableId,Map<TabletId,TabletServerId>> groupedUnassigned = new HashMap<>();
params.unassignedTablets().forEach((tid, lastTserver) -> groupedUnassigned
.computeIfAbsent(tid.getTable(), k -> new HashMap<>()).put(tid, lastTserver));
for (Entry<TableId,Map<TabletId,TabletServerId>> e : groupedUnassigned.entrySet()) {
Map<TabletId,TabletServerId> newAssignments = new HashMap<>();
getBalancerForTable(e.getKey()).getAssignments(
new AssignmentParamsImpl(params.currentStatus(), e.getValue(), newAssignments));
newAssignments.forEach(params::addAssignment);
}
}
@Override
public long balance(BalanceParameters params) {
long minBalanceTime = 5_000;
// Iterate over the tables and balance each of them
for (TableId tableId : environment.getTableIdMap().values()) {
ArrayList<TabletMigration> newMigrations = new ArrayList<>();
long tableBalanceTime = getBalancerForTable(tableId).balance(
new BalanceParamsImpl(params.currentStatus(), params.currentMigrations(), newMigrations));
if (tableBalanceTime < minBalanceTime) {
minBalanceTime = tableBalanceTime;
}
params.migrationsOut().addAll(newMigrations);
}
return minBalanceTime;
}
}
| 9,806 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/TabletBalancer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
/**
* This class is responsible for managing the distribution of tablets throughout an Accumulo
* cluster. In most cases, users will want a balancer implementation which ensures a uniform
* distribution of tablets, so that no individual tablet server is handling significantly more work
* than any other.
*
* <p>
* Implementations may wish to store configuration in Accumulo's system configuration using the
* {@link Property#GENERAL_ARBITRARY_PROP_PREFIX}. They may also benefit from using per-table
* configuration using {@link Property#TABLE_ARBITRARY_PROP_PREFIX}.
*
* @since 2.1.0
*/
public interface TabletBalancer {
/**
* An interface for grouping parameters required for the balancer to assign unassigned tablets.
* This interface allows for evolution of the parameter set without changing the balancer's method
* signature.
*
* @since 2.1.0
*/
interface AssignmentParameters {
/**
* @return the current status for all tablet servers (read-only)
*/
SortedMap<TabletServerId,TServerStatus> currentStatus();
/**
* @return the tablets that need to be assigned, mapped to their previous known location
* (read-only)
*/
Map<TabletId,TabletServerId> unassignedTablets();
/**
* Assigns {@code tabletId} to {@code tabletServerId}.
*/
void addAssignment(TabletId tabletId, TabletServerId tabletServerId);
}
/**
* An interface for grouping parameters required for the balancer to balance tablets. This
* interface allows for evolution of the parameter set without changing the balancer's method
* signature.
*
* @since 2.1.0
*/
interface BalanceParameters {
/**
* @return the current status for all tablet servers (read-only)
*/
SortedMap<TabletServerId,TServerStatus> currentStatus();
/**
* @return the migrations that are currently in progress (read-only)
*/
Set<TabletId> currentMigrations();
/**
* @return a write-only map for storing new assignments made by the balancer. It is important
* that any tablets found in {@link #currentMigrations()} are not included in the output
* migrations.
*/
List<TabletMigration> migrationsOut();
}
/**
* Initialize the TabletBalancer. This gives the balancer the opportunity to read the
* configuration.
*/
void init(BalancerEnvironment balancerEnvironment);
/**
* Assign tablets to tablet servers. This method is called whenever the manager finds tablets that
* are unassigned.
*/
void getAssignments(AssignmentParameters params);
/**
* Ask the balancer if any migrations are necessary.
*
* If the balancer is going to self-abort due to some environmental constraint (e.g. it requires
* some minimum number of tservers, or a maximum number of outstanding migrations), it should
* issue a log message to alert operators. The message should be at WARN normally and at ERROR if
* the balancer knows that the problem can not self correct. It should not issue these messages
* more than once a minute. This method will not be called when there are unassigned tablets.
*
* @return the time, in milliseconds, to wait before re-balancing.
*/
long balance(BalanceParameters params);
}
| 9,807 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/RegexGroupBalancer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.hadoop.io.Text;
/**
* A {@link GroupBalancer} that groups tablets using a configurable regex. To use this balancer
* configure the following settings for your table then configure this balancer for your table.
*
* <ul>
* <li>Set {@code table.custom.balancer.group.regex.pattern} to a regular expression. This regular
* expression must have one group. The regex is applied to the tablet end row and whatever the regex
* group matches is used as the group. For example with a regex of {@code (\d\d).*} and an end row
* of {@code 12abc}, the group for the tablet would be {@code 12}.
* <li>Set {@code table.custom.balancer.group.regex.default} to a default group. This group is
* returned for the last tablet in the table and tablets for which the regex does not match.
* <li>Optionally set {@code table.custom.balancer.group.regex.wait.time} to time (can use time
* suffixes). This determines how long to wait between balancing. Since this balancer scans the
* metadata table, may want to set this higher for large tables.
* </ul>
*
* @since 2.1.0
*/
public class RegexGroupBalancer extends GroupBalancer {
public static final String REGEX_PROPERTY =
Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "balancer.group.regex.pattern";
public static final String DEFAUT_GROUP_PROPERTY =
Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "balancer.group.regex.default";
public static final String WAIT_TIME_PROPERTY =
Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "balancer.group.regex.wait.time";
private final TableId tableId;
public RegexGroupBalancer(TableId tableId) {
super(tableId);
this.tableId = tableId;
}
@Override
protected long getWaitTime() {
ServiceEnvironment.Configuration conf = environment.getConfiguration(tableId);
if (conf.isSet(WAIT_TIME_PROPERTY)) {
return ConfigurationTypeHelper.getTimeInMillis(conf.get(WAIT_TIME_PROPERTY));
}
return super.getWaitTime();
}
@Override
protected Function<TabletId,String> getPartitioner() {
ServiceEnvironment.Configuration conf = environment.getConfiguration(tableId);
String regex = conf.get(REGEX_PROPERTY);
final String defaultGroup = conf.get(DEFAUT_GROUP_PROPERTY);
final Pattern pattern = Pattern.compile(regex);
return input -> {
Text er = input.getEndRow();
if (er == null) {
return defaultGroup;
}
Matcher matcher = pattern.matcher(er.toString());
if (matcher.matches() && matcher.groupCount() == 1) {
return matcher.group(1);
}
return defaultGroup;
};
}
}
| 9,808 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/GroupBalancer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import java.util.function.Function;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.manager.balancer.TabletServerIdImpl;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.apache.accumulo.core.util.ComparablePair;
import org.apache.accumulo.core.util.MapCounter;
import org.apache.accumulo.core.util.Pair;
import org.apache.commons.lang3.mutable.MutableInt;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Multimap;
/**
* A balancer that evenly spreads groups of tablets across all tablet server. This balancer
* accomplishes the following two goals :
*
* <ul>
* <li>Evenly spreads each group across all tservers.
* <li>Minimizes the total number of groups on each tserver.
* </ul>
*
* <p>
* To use this balancer you must extend it and implement {@link #getPartitioner()}. See
* {@link RegexGroupBalancer} as an example.
*
* @since 2.1.0
*/
public abstract class GroupBalancer implements TabletBalancer {
protected BalancerEnvironment environment;
private final TableId tableId;
private long lastRun = 0;
@Override
public void init(BalancerEnvironment balancerEnvironment) {
this.environment = balancerEnvironment;
}
/**
* @return A function that groups tablets into named groups.
*/
protected abstract Function<TabletId,String> getPartitioner();
public GroupBalancer(TableId tableId) {
this.tableId = tableId;
}
protected Map<TabletId,TabletServerId> getLocationProvider() {
return environment.listTabletLocations(tableId);
}
/**
* The amount of time to wait between balancing.
*/
protected long getWaitTime() {
return 60000;
}
/**
* The maximum number of migrations to perform in a single pass.
*/
protected int getMaxMigrations() {
return 1000;
}
/**
* @return Examine current tserver and migrations and return true if balancing should occur.
*/
protected boolean shouldBalance(SortedMap<TabletServerId,TServerStatus> current,
Set<TabletId> migrations) {
if (current.size() < 2) {
return false;
}
return migrations.stream().noneMatch(t -> t.getTable().equals(tableId));
}
@Override
public void getAssignments(AssignmentParameters params) {
if (params.currentStatus().isEmpty()) {
return;
}
Function<TabletId,String> partitioner = getPartitioner();
List<ComparablePair<String,TabletId>> tabletsByGroup = new ArrayList<>();
for (Entry<TabletId,TabletServerId> entry : params.unassignedTablets().entrySet()) {
TabletServerId last = entry.getValue();
if (last != null) {
// Maintain locality
String fakeSessionID = " ";
TabletServerId simple =
new TabletServerIdImpl(last.getHost(), last.getPort(), fakeSessionID);
Iterator<TabletServerId> find = params.currentStatus().tailMap(simple).keySet().iterator();
if (find.hasNext()) {
TabletServerId tserver = find.next();
if (tserver.getHost().equals(last.getHost())) {
params.addAssignment(entry.getKey(), tserver);
continue;
}
}
}
tabletsByGroup.add(new ComparablePair<>(partitioner.apply(entry.getKey()), entry.getKey()));
}
Collections.sort(tabletsByGroup);
Iterator<TabletServerId> tserverIter = Iterators.cycle(params.currentStatus().keySet());
for (ComparablePair<String,TabletId> pair : tabletsByGroup) {
TabletId tabletId = pair.getSecond();
params.addAssignment(tabletId, tserverIter.next());
}
}
@Override
public long balance(BalanceParameters params) {
// The terminology extra and expected are used in this code. Expected tablets is the number of
// tablets a tserver must have for a given group and is
// numInGroup/numTservers. Extra tablets are any tablets more than the number expected for a
// given group. If numInGroup % numTservers > 0, then a tserver
// may have one extra tablet for a group.
//
// Assume we have 4 tservers and group A has 11 tablets.
// * expected tablets : group A is expected to have 2 tablets on each tservers
// * extra tablets : group A may have an additional tablet on each tserver. Group A has a total
// of 3 extra tablets.
//
// This balancer also evens out the extra tablets across all groups. The terminology
// extraExpected and extraExtra is used to describe these tablets.
// ExtraExpected is totalExtra/numTservers. ExtraExtra is totalExtra%numTservers. Each tserver
// should have at least expectedExtra extra tablets and at most
// one extraExtra tablets. All extra tablets on a tserver must be from different groups.
//
// Assume we have 6 tservers and three groups (G1, G2, G3) with 9 tablets each. Each tserver is
// expected to have one tablet from each group and could
// possibly have 2 tablets from a group. Below is an illustration of an ideal balancing of extra
// tablets. To understand the illustration, the first column
// shows tserver T1 with 2 tablets from G1, 1 tablet from G2, and two tablets from G3. EE means
// empty, put it there so eclipse formatting would not mess up
// table.
//
// T1 | T2 | T3 | T4 | T5 | T6
// ---+----+----+----+----+-----
// G3 | G2 | G3 | EE | EE | EE <-- extra extra tablets
// G1 | G1 | G1 | G2 | G3 | G2 <-- extra expected tablets.
// G1 | G1 | G1 | G1 | G1 | G1 <-- expected tablets for group 1
// G2 | G2 | G2 | G2 | G2 | G2 <-- expected tablets for group 2
// G3 | G3 | G3 | G3 | G3 | G3 <-- expected tablets for group 3
//
// Do not want to balance the extra tablets like the following. There are two problem with this.
// First extra tablets are not evenly spread. Since there are
// a total of 9 extra tablets, every tserver is expected to have at least one extra tablet.
// Second tserver T1 has two extra tablet for group G1. This
// violates the principal that a tserver can only have one extra tablet for a given group.
//
// T1 | T2 | T3 | T4 | T5 | T6
// ---+----+----+----+----+-----
// G1 | EE | EE | EE | EE | EE <--- one extra tablets from group 1
// G3 | G3 | G3 | EE | EE | EE <--- three extra tablets from group 3
// G2 | G2 | G2 | EE | EE | EE <--- three extra tablets from group 2
// G1 | G1 | EE | EE | EE | EE <--- two extra tablets from group 1
// G1 | G1 | G1 | G1 | G1 | G1 <-- expected tablets for group 1
// G2 | G2 | G2 | G2 | G2 | G2 <-- expected tablets for group 2
// G3 | G3 | G3 | G3 | G3 | G3 <-- expected tablets for group 3
if (!shouldBalance(params.currentStatus(), params.currentMigrations())) {
return 5000;
}
if (System.currentTimeMillis() - lastRun < getWaitTime()) {
return 5000;
}
MapCounter<String> groupCounts = new MapCounter<>();
Map<TabletServerId,TserverGroupInfo> tservers = new HashMap<>();
for (TabletServerId tsi : params.currentStatus().keySet()) {
tservers.put(tsi, new TserverGroupInfo(tsi));
}
Function<TabletId,String> partitioner = getPartitioner();
// collect stats about current state
for (var tablet : getLocationProvider().entrySet()) {
String group = partitioner.apply(tablet.getKey());
var loc = tablet.getValue();
if (loc == null || !tservers.containsKey(loc)) {
return 5000;
}
groupCounts.increment(group, 1);
TserverGroupInfo tgi = tservers.get(loc);
tgi.addGroup(group);
}
Map<String,Integer> expectedCounts = new HashMap<>();
int totalExtra = 0;
for (String group : groupCounts.keySet()) {
int groupCount = groupCounts.getInt(group);
totalExtra += groupCount % params.currentStatus().size();
expectedCounts.put(group, (groupCount / params.currentStatus().size()));
}
// The number of extra tablets from all groups that each tserver must have.
int expectedExtra = totalExtra / params.currentStatus().size();
int maxExtraGroups = expectedExtra + 1;
expectedCounts = Collections.unmodifiableMap(expectedCounts);
tservers = Collections.unmodifiableMap(tservers);
for (TserverGroupInfo tgi : tservers.values()) {
tgi.finishedAdding(expectedCounts);
}
Moves moves = new Moves();
// The order of the following steps is important, because as ordered each step should not move
// any tablets moved by a previous step.
balanceExpected(tservers, moves);
if (moves.size() < getMaxMigrations()) {
balanceExtraExpected(tservers, expectedExtra, moves);
if (moves.size() < getMaxMigrations()) {
boolean cont = balanceExtraMultiple(tservers, maxExtraGroups, moves);
if (cont && moves.size() < getMaxMigrations()) {
balanceExtraExtra(tservers, maxExtraGroups, moves);
}
}
}
populateMigrations(tservers.keySet(), params.migrationsOut(), moves);
lastRun = System.currentTimeMillis();
return 5000;
}
static class TserverGroupInfo {
private Map<String,Integer> expectedCounts;
private final Map<String,MutableInt> initialCounts = new HashMap<>();
private final Map<String,Integer> extraCounts = new HashMap<>();
private final Map<String,Integer> expectedDeficits = new HashMap<>();
private final TabletServerId tsi;
private boolean finishedAdding = false;
TserverGroupInfo(TabletServerId tsi) {
this.tsi = tsi;
}
public void addGroup(String group) {
checkState(!finishedAdding);
MutableInt mi = initialCounts.get(group);
if (mi == null) {
mi = new MutableInt();
initialCounts.put(group, mi);
}
mi.increment();
}
public void finishedAdding(Map<String,Integer> expectedCounts) {
checkState(!finishedAdding);
finishedAdding = true;
this.expectedCounts = expectedCounts;
for (Entry<String,Integer> entry : expectedCounts.entrySet()) {
String group = entry.getKey();
int expected = entry.getValue();
MutableInt count = initialCounts.get(group);
int num = count == null ? 0 : count.intValue();
if (num < expected) {
expectedDeficits.put(group, expected - num);
} else if (num > expected) {
extraCounts.put(group, num - expected);
}
}
}
public void moveOff(String group, int num) {
checkArgument(num > 0);
checkState(finishedAdding);
Integer extraCount = extraCounts.get(group);
// don't wrap precondition check due to https://github.com/spotbugs/spotbugs/issues/462
String formatString = "group=%s num=%s extraCount=%s";
checkArgument(extraCount != null && extraCount >= num, formatString, group, num, extraCount);
MutableInt initialCount = initialCounts.get(group);
checkArgument(initialCount.intValue() >= num);
initialCount.subtract(num);
if (extraCount - num == 0) {
extraCounts.remove(group);
} else {
extraCounts.put(group, extraCount - num);
}
}
public void moveTo(String group, int num) {
checkArgument(num > 0);
checkArgument(expectedCounts.containsKey(group));
checkState(finishedAdding);
Integer deficit = expectedDeficits.get(group);
if (deficit != null) {
if (num >= deficit) {
expectedDeficits.remove(group);
num -= deficit;
} else {
expectedDeficits.put(group, deficit - num);
num = 0;
}
}
if (num > 0) {
Integer extra = extraCounts.get(group);
if (extra == null) {
extra = 0;
}
extraCounts.put(group, extra + num);
}
// TODO could check extra constraints
}
public Map<String,Integer> getExpectedDeficits() {
checkState(finishedAdding);
return Collections.unmodifiableMap(expectedDeficits);
}
public Map<String,Integer> getExtras() {
checkState(finishedAdding);
return Collections.unmodifiableMap(extraCounts);
}
public TabletServerId getTabletServerId() {
return tsi;
}
@Override
public int hashCode() {
return tsi.hashCode();
}
@Override
public boolean equals(Object o) {
if (o instanceof TserverGroupInfo) {
TserverGroupInfo otgi = (TserverGroupInfo) o;
return tsi.equals(otgi.tsi);
}
return false;
}
@Override
public String toString() {
return tsi.toString();
}
}
private static class Move {
TserverGroupInfo dest;
int count;
public Move(TserverGroupInfo dest, int num) {
this.dest = dest;
this.count = num;
}
}
private static class Moves {
private final HashBasedTable<TabletServerId,String,List<Move>> moves = HashBasedTable.create();
private int totalMoves = 0;
public void move(String group, int num, TserverGroupInfo src, TserverGroupInfo dest) {
checkArgument(num > 0);
checkArgument(!src.equals(dest));
src.moveOff(group, num);
dest.moveTo(group, num);
List<Move> srcMoves = moves.get(src.getTabletServerId(), group);
if (srcMoves == null) {
srcMoves = new ArrayList<>();
moves.put(src.getTabletServerId(), group, srcMoves);
}
srcMoves.add(new Move(dest, num));
totalMoves += num;
}
public TabletServerId removeMove(TabletServerId src, String group) {
List<Move> srcMoves = moves.get(src, group);
if (srcMoves == null) {
return null;
}
Move move = srcMoves.get(srcMoves.size() - 1);
TabletServerId ret = move.dest.getTabletServerId();
totalMoves--;
move.count--;
if (move.count == 0) {
srcMoves.remove(srcMoves.size() - 1);
if (srcMoves.isEmpty()) {
moves.remove(src, group);
}
}
return ret;
}
public int size() {
return totalMoves;
}
}
private void balanceExtraExtra(Map<TabletServerId,TserverGroupInfo> tservers, int maxExtraGroups,
Moves moves) {
HashBasedTable<String,TabletServerId,TserverGroupInfo> surplusExtra = HashBasedTable.create();
for (TserverGroupInfo tgi : tservers.values()) {
Map<String,Integer> extras = tgi.getExtras();
if (extras.size() > maxExtraGroups) {
for (String group : extras.keySet()) {
surplusExtra.put(group, tgi.getTabletServerId(), tgi);
}
}
}
ArrayList<Pair<String,TabletServerId>> serversGroupsToRemove = new ArrayList<>();
ArrayList<TabletServerId> serversToRemove = new ArrayList<>();
for (TserverGroupInfo destTgi : tservers.values()) {
if (surplusExtra.isEmpty()) {
break;
}
Map<String,Integer> extras = destTgi.getExtras();
if (extras.size() < maxExtraGroups) {
serversToRemove.clear();
serversGroupsToRemove.clear();
for (String group : surplusExtra.rowKeySet()) {
if (!extras.containsKey(group)) {
TserverGroupInfo srcTgi = surplusExtra.row(group).values().iterator().next();
moves.move(group, 1, srcTgi, destTgi);
if (srcTgi.getExtras().size() <= maxExtraGroups) {
serversToRemove.add(srcTgi.getTabletServerId());
} else {
serversGroupsToRemove.add(new Pair<>(group, srcTgi.getTabletServerId()));
}
if (destTgi.getExtras().size() >= maxExtraGroups
|| moves.size() >= getMaxMigrations()) {
break;
}
}
}
if (!serversToRemove.isEmpty()) {
surplusExtra.columnKeySet().removeAll(serversToRemove);
}
for (Pair<String,TabletServerId> pair : serversGroupsToRemove) {
surplusExtra.remove(pair.getFirst(), pair.getSecond());
}
if (moves.size() >= getMaxMigrations()) {
break;
}
}
}
}
private boolean balanceExtraMultiple(Map<TabletServerId,TserverGroupInfo> tservers,
int maxExtraGroups, Moves moves) {
Multimap<String,TserverGroupInfo> extraMultiple = HashMultimap.create();
for (TserverGroupInfo tgi : tservers.values()) {
Map<String,Integer> extras = tgi.getExtras();
for (Entry<String,Integer> entry : extras.entrySet()) {
if (entry.getValue() > 1) {
extraMultiple.put(entry.getKey(), tgi);
}
}
}
balanceExtraMultiple(tservers, maxExtraGroups, moves, extraMultiple, false);
if (moves.size() < getMaxMigrations() && !extraMultiple.isEmpty()) {
// no place to move so must exceed maxExtra temporarily... subsequent balancer calls will
// smooth things out
balanceExtraMultiple(tservers, maxExtraGroups, moves, extraMultiple, true);
return false;
} else {
return true;
}
}
private void balanceExtraMultiple(Map<TabletServerId,TserverGroupInfo> tservers,
int maxExtraGroups, Moves moves, Multimap<String,TserverGroupInfo> extraMultiple,
boolean alwaysAdd) {
ArrayList<Pair<String,TserverGroupInfo>> serversToRemove = new ArrayList<>();
for (TserverGroupInfo destTgi : tservers.values()) {
Map<String,Integer> extras = destTgi.getExtras();
if (alwaysAdd || extras.size() < maxExtraGroups) {
serversToRemove.clear();
for (String group : extraMultiple.keySet()) {
if (!extras.containsKey(group)) {
Collection<TserverGroupInfo> sources = extraMultiple.get(group);
Iterator<TserverGroupInfo> iter = sources.iterator();
TserverGroupInfo srcTgi = iter.next();
int num = srcTgi.getExtras().get(group);
moves.move(group, 1, srcTgi, destTgi);
if (num == 2) {
serversToRemove.add(new Pair<>(group, srcTgi));
}
if (destTgi.getExtras().size() >= maxExtraGroups
|| moves.size() >= getMaxMigrations()) {
break;
}
}
}
for (Pair<String,TserverGroupInfo> pair : serversToRemove) {
extraMultiple.remove(pair.getFirst(), pair.getSecond());
}
if (extraMultiple.isEmpty() || moves.size() >= getMaxMigrations()) {
break;
}
}
}
}
private void balanceExtraExpected(Map<TabletServerId,TserverGroupInfo> tservers,
int expectedExtra, Moves moves) {
HashBasedTable<String,TabletServerId,TserverGroupInfo> extraSurplus = HashBasedTable.create();
for (TserverGroupInfo tgi : tservers.values()) {
Map<String,Integer> extras = tgi.getExtras();
if (extras.size() > expectedExtra) {
for (String group : extras.keySet()) {
extraSurplus.put(group, tgi.getTabletServerId(), tgi);
}
}
}
ArrayList<TabletServerId> emptyServers = new ArrayList<>();
ArrayList<Pair<String,TabletServerId>> emptyServerGroups = new ArrayList<>();
for (TserverGroupInfo destTgi : tservers.values()) {
if (extraSurplus.isEmpty()) {
break;
}
Map<String,Integer> extras = destTgi.getExtras();
if (extras.size() < expectedExtra) {
emptyServers.clear();
emptyServerGroups.clear();
nextGroup: for (String group : extraSurplus.rowKeySet()) {
if (!extras.containsKey(group)) {
Iterator<TserverGroupInfo> iter = extraSurplus.row(group).values().iterator();
TserverGroupInfo srcTgi = iter.next();
while (srcTgi.getExtras().size() <= expectedExtra) {
if (iter.hasNext()) {
srcTgi = iter.next();
} else {
continue nextGroup;
}
}
moves.move(group, 1, srcTgi, destTgi);
if (srcTgi.getExtras().size() <= expectedExtra) {
emptyServers.add(srcTgi.getTabletServerId());
} else if (srcTgi.getExtras().get(group) == null) {
emptyServerGroups.add(new Pair<>(group, srcTgi.getTabletServerId()));
}
if (destTgi.getExtras().size() >= expectedExtra || moves.size() >= getMaxMigrations()) {
break;
}
}
}
if (!emptyServers.isEmpty()) {
extraSurplus.columnKeySet().removeAll(emptyServers);
}
for (Pair<String,TabletServerId> pair : emptyServerGroups) {
extraSurplus.remove(pair.getFirst(), pair.getSecond());
}
if (moves.size() >= getMaxMigrations()) {
break;
}
}
}
}
private void balanceExpected(Map<TabletServerId,TserverGroupInfo> tservers, Moves moves) {
Multimap<String,TserverGroupInfo> groupDefecits = HashMultimap.create();
Multimap<String,TserverGroupInfo> groupSurplus = HashMultimap.create();
for (TserverGroupInfo tgi : tservers.values()) {
for (String group : tgi.getExpectedDeficits().keySet()) {
groupDefecits.put(group, tgi);
}
for (String group : tgi.getExtras().keySet()) {
groupSurplus.put(group, tgi);
}
}
for (String group : groupDefecits.keySet()) {
Collection<TserverGroupInfo> defecitServers = groupDefecits.get(group);
for (TserverGroupInfo defecitTsi : defecitServers) {
int numToMove = defecitTsi.getExpectedDeficits().get(group);
Iterator<TserverGroupInfo> surplusIter = groupSurplus.get(group).iterator();
while (numToMove > 0) {
TserverGroupInfo surplusTsi = surplusIter.next();
int available = surplusTsi.getExtras().get(group);
if (numToMove >= available) {
surplusIter.remove();
}
int transfer = Math.min(numToMove, available);
numToMove -= transfer;
moves.move(group, transfer, surplusTsi, defecitTsi);
if (moves.size() >= getMaxMigrations()) {
return;
}
}
}
}
}
private void populateMigrations(Set<TabletServerId> current, List<TabletMigration> migrationsOut,
Moves moves) {
if (moves.size() == 0) {
return;
}
Function<TabletId,String> partitioner = getPartitioner();
for (var tablet : getLocationProvider().entrySet()) {
String group = partitioner.apply(tablet.getKey());
var loc = tablet.getValue();
if (loc == null || !current.contains(loc)) {
migrationsOut.clear();
return;
}
TabletServerId dest = moves.removeMove(loc, group);
if (dest != null) {
migrationsOut.add(new TabletMigration(tablet.getKey(), loc, dest));
if (moves.size() == 0) {
break;
}
}
}
}
}
| 9,809 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/SimpleLoadBalancer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import static java.util.concurrent.TimeUnit.SECONDS;
import java.util.AbstractMap.SimpleEntry;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.SortedMap;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.manager.balancer.TabletServerIdImpl;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TableStatistics;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.apache.accumulo.core.spi.balancer.data.TabletStatistics;
import org.apache.accumulo.core.spi.balancer.util.ThrottledBalancerProblemReporter;
import org.apache.accumulo.core.spi.balancer.util.ThrottledBalancerProblemReporter.OutstandingMigrationsProblem;
import org.apache.accumulo.core.spi.balancer.util.ThrottledBalancerProblemReporter.Problem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A simple tablet balancer that attempts to spread tablets out evenly across all available tablet
* servers. The goal is to achieve the same number of tablets on each tablet server.
*
* <p>
* Initial assignments attempt to maintain locality by assigning tablets to their previous location
* if possible, but otherwise assignments are made in a random fashion across all available tablet
* servers.
*
* @since 2.1.0
*/
public class SimpleLoadBalancer implements TabletBalancer {
private static final Logger log = LoggerFactory.getLogger(SimpleLoadBalancer.class);
protected BalancerEnvironment environment;
Iterator<TabletServerId> assignments;
// if tableToBalance is set, then only balance the given table
TableId tableToBalance = null;
public SimpleLoadBalancer() {}
public SimpleLoadBalancer(TableId table) {
tableToBalance = table;
}
@Override
public void init(BalancerEnvironment balancerEnvironment) {
this.environment = balancerEnvironment;
}
List<TabletServerId> randomize(Set<TabletServerId> locations) {
List<TabletServerId> result = new ArrayList<>(locations);
Collections.shuffle(result);
return result;
}
public TabletServerId getAssignment(SortedMap<TabletServerId,TServerStatus> locations,
TabletServerId last) {
if (locations.isEmpty()) {
return null;
}
if (last != null) {
// Maintain locality
String fakeSessionID = " ";
TabletServerId simple = new TabletServerIdImpl(last.getHost(), last.getPort(), fakeSessionID);
Iterator<TabletServerId> find = locations.tailMap(simple).keySet().iterator();
if (find.hasNext()) {
TabletServerId current = find.next();
if (current.getHost().equals(last.getHost())) {
return current;
}
}
}
// The strategy here is to walk through the locations and hand them back, one at a time
// Grab an iterator off of the set of options; use a new iterator if it hands back something not
// in the current list.
if (assignments == null || !assignments.hasNext()) {
assignments = randomize(locations.keySet()).iterator();
}
TabletServerId result = assignments.next();
if (!locations.containsKey(result)) {
assignments = null;
return randomize(locations.keySet()).iterator().next();
}
return result;
}
static class ServerCounts implements Comparable<ServerCounts> {
public final TabletServerId server;
public int count;
public final TServerStatus status;
ServerCounts(int count, TabletServerId server, TServerStatus status) {
this.count = count;
this.server = server;
this.status = status;
}
@Override
public int hashCode() {
return Objects.hashCode(server) + count;
}
@Override
public boolean equals(Object obj) {
return obj == this || (obj instanceof ServerCounts && compareTo((ServerCounts) obj) == 0);
}
@Override
public int compareTo(ServerCounts obj) {
int result = count - obj.count;
if (result == 0) {
return server.compareTo(obj.server);
}
return result;
}
}
public boolean getMigrations(Map<TabletServerId,TServerStatus> current,
List<TabletMigration> result) {
boolean moreBalancingNeeded = false;
try {
// no moves possible
if (current.size() < 2) {
return false;
}
final Map<TableId,Map<TabletId,TabletStatistics>> donerTabletStats = new HashMap<>();
// Sort by total number of online tablets, per server
int total = 0;
ArrayList<ServerCounts> totals = new ArrayList<>();
for (Entry<TabletServerId,TServerStatus> entry : current.entrySet()) {
int serverTotal = 0;
if (entry.getValue() != null && entry.getValue().getTableMap() != null) {
for (Entry<String,TableStatistics> e : entry.getValue().getTableMap().entrySet()) {
/*
* The check below was on entry.getKey(), but that resolves to a tabletserver not a
* tablename. Believe it should be e.getKey() which is a tablename
*/
if (tableToBalance == null || tableToBalance.canonical().equals(e.getKey())) {
serverTotal += e.getValue().getOnlineTabletCount();
}
}
}
totals.add(new ServerCounts(serverTotal, entry.getKey(), entry.getValue()));
total += serverTotal;
}
// order from low to high
totals.sort(Collections.reverseOrder());
int even = total / totals.size();
int numServersOverEven = total % totals.size();
// Move tablets from the servers with too many to the servers with
// the fewest but only nominate tablets to move once. This allows us
// to fill new servers with tablets from a mostly balanced server
// very quickly. However, it may take several balancing passes to move
// tablets from one hugely overloaded server to many slightly
// under-loaded servers.
int end = totals.size() - 1;
int movedAlready = 0;
int tooManyIndex = 0;
while (tooManyIndex < end) {
ServerCounts tooMany = totals.get(tooManyIndex);
int goal = even;
if (tooManyIndex < numServersOverEven) {
goal++;
}
int needToUnload = tooMany.count - goal;
ServerCounts tooLittle = totals.get(end);
int needToLoad = goal - tooLittle.count - movedAlready;
if (needToUnload < 1 && needToLoad < 1) {
break;
}
if (needToUnload >= needToLoad) {
result.addAll(move(tooMany, tooLittle, needToLoad, donerTabletStats));
end--;
movedAlready = 0;
} else {
result.addAll(move(tooMany, tooLittle, needToUnload, donerTabletStats));
movedAlready += needToUnload;
}
if (needToUnload > needToLoad) {
moreBalancingNeeded = true;
} else {
tooManyIndex++;
donerTabletStats.clear();
}
}
} finally {
log.trace("balance ended with {} migrations", result.size());
}
return moreBalancingNeeded;
}
/**
* Select a tablet based on differences between table loads; if the loads are even, use the
* busiest table
*/
List<TabletMigration> move(ServerCounts tooMuch, ServerCounts tooLittle, int count,
Map<TableId,Map<TabletId,TabletStatistics>> donerTabletStats) {
if (count == 0) {
return Collections.emptyList();
}
List<TabletMigration> result = new ArrayList<>();
// Copy counts so we can update them as we propose migrations
Map<TableId,Integer> tooMuchMap = tabletCountsPerTable(tooMuch.status);
Map<TableId,Integer> tooLittleMap = tabletCountsPerTable(tooLittle.status);
for (int i = 0; i < count; i++) {
TableId table = getTableToMigrate(tooMuch, tooMuchMap, tooLittleMap);
Map<TabletId,TabletStatistics> onlineTabletsForTable = donerTabletStats.get(table);
try {
if (onlineTabletsForTable == null) {
onlineTabletsForTable = new HashMap<>();
List<TabletStatistics> stats = getOnlineTabletsForTable(tooMuch.server, table);
if (stats == null) {
log.warn("Unable to find tablets to move");
return result;
}
for (TabletStatistics stat : stats) {
onlineTabletsForTable.put(stat.getTabletId(), stat);
}
donerTabletStats.put(table, onlineTabletsForTable);
}
} catch (Exception ex) {
log.error("Unable to select a tablet to move", ex);
return result;
}
TabletId tabletId = selectTablet(onlineTabletsForTable);
onlineTabletsForTable.remove(tabletId);
if (tabletId == null) {
return result;
}
tooMuchMap.put(table, tooMuchMap.get(table) - 1);
/*
* If a table grows from 1 tablet then tooLittleMap.get(table) can return a null, since there
* is only one tabletserver that holds all of the tablets. Here we check to see if in fact
* that is the case and if so set the value to 0.
*/
Integer tooLittleCount = tooLittleMap.getOrDefault(table, 0);
tooLittleMap.put(table, tooLittleCount + 1);
tooMuch.count--;
tooLittle.count++;
result.add(new TabletMigration(tabletId, tooMuch.server, tooLittle.server));
}
return result;
}
private TableId getTableToMigrate(ServerCounts tooMuch, Map<TableId,Integer> tooMuchMap,
Map<TableId,Integer> tooLittleMap) {
if (tableToBalance != null) {
return tableToBalance;
}
// find a table to migrate
// look for an uneven table count
Entry<TableId,Integer> biggestEntry = tooMuchMap.entrySet().stream().map(entry -> {
TableId tableID = entry.getKey();
int diff = entry.getValue() - tooLittleMap.getOrDefault(tableID, 0);
return new SimpleEntry<>(tableID, diff); // map the table count to the difference
}).max(Entry.comparingByValue()) // get the largest difference
.orElseGet(() -> new SimpleEntry<>(null, 0));
if (biggestEntry.getValue() < 2) {
return busiest(tooMuch.status.getTableMap());
} else {
return biggestEntry.getKey();
}
}
protected List<TabletStatistics> getOnlineTabletsForTable(TabletServerId tabletServerId,
TableId tableId) throws AccumuloSecurityException, AccumuloException {
return environment.listOnlineTabletsForTable(tabletServerId, tableId);
}
static Map<TableId,Integer> tabletCountsPerTable(TServerStatus status) {
Map<TableId,Integer> result = new HashMap<>();
if (status != null && status.getTableMap() != null) {
Map<String,TableStatistics> tableMap = status.getTableMap();
for (Entry<String,TableStatistics> entry : tableMap.entrySet()) {
result.put(TableId.of(entry.getKey()), entry.getValue().getOnlineTabletCount());
}
}
return result;
}
static TabletId selectTablet(Map<TabletId,TabletStatistics> extents) {
if (extents.isEmpty()) {
return null;
}
TabletId mostRecentlySplit = null;
long splitTime = 0;
for (Entry<TabletId,TabletStatistics> entry : extents.entrySet()) {
if (entry.getValue().getSplitCreationTime() >= splitTime) {
splitTime = entry.getValue().getSplitCreationTime();
mostRecentlySplit = entry.getKey();
}
}
return mostRecentlySplit;
}
// define what it means for a tablet to be busy
private static TableId busiest(Map<String,TableStatistics> tables) {
TableId result = null;
double busiest = Double.NEGATIVE_INFINITY;
for (Entry<String,TableStatistics> entry : tables.entrySet()) {
TableStatistics info = entry.getValue();
double busy = info.getIngestRate() + info.getQueryRate();
if (busy > busiest) {
busiest = busy;
result = TableId.of(entry.getKey());
}
}
return result;
}
@Override
public void getAssignments(AssignmentParameters params) {
params.unassignedTablets().forEach((tabletId, tserverId) -> params.addAssignment(tabletId,
getAssignment(params.currentStatus(), tserverId)));
}
private final ThrottledBalancerProblemReporter problemReporter =
new ThrottledBalancerProblemReporter(getClass());
private final Problem noTserversProblem = problemReporter.createNoTabletServersProblem();
private final OutstandingMigrationsProblem outstandingMigrationsProblem =
problemReporter.createOutstandingMigrationsProblem();
@Override
public long balance(BalanceParameters params) {
// do we have any servers?
if (params.currentStatus().isEmpty()) {
problemReporter.reportProblem(noTserversProblem);
} else {
// Don't migrate if we have migrations in progress
if (params.currentMigrations().isEmpty()) {
problemReporter.clearProblemReportTimes();
if (getMigrations(params.currentStatus(), params.migrationsOut())) {
return SECONDS.toMillis(1);
}
} else {
outstandingMigrationsProblem.setMigrations(params.currentMigrations());
problemReporter.reportProblem(outstandingMigrationsProblem);
}
}
return 5_000;
}
}
| 9,810 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/BalancerEnvironment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.apache.accumulo.core.spi.balancer.data.TabletStatistics;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
/**
* This interface is an extension of {@link ServiceEnvironment} that exposes system level
* information that is specific to tablet balancing.
*
* @since 2.1.0
*/
public interface BalancerEnvironment extends ServiceEnvironment {
/**
* Many Accumulo plugins are given table IDs as this is what Accumulo uses internally to identify
* tables. This provides a mapping of table names to table IDs for the purposes of translating
* and/or enumerating the existing tables.
*/
Map<String,TableId> getTableIdMap();
/**
* Accumulo plugins working with a table may need to know if the table is online or not before
* operating on it.
*
* @param tableId The id of the table to check.
* @return {@code true} if the table is online and {@code false} if not
*/
boolean isTableOnline(TableId tableId);
/**
* Fetch the locations for each of {@code tableId}'s tablets from the metadata table. If there is
* no location available for a given tablet, then the returned mapping will have a {@code null}
* value stored for the tablet id.
*
* @param tableId The id of the table for which to retrieve tablets.
* @return a mapping of {@link TabletId} to {@link TabletServerId} (or @null if no location is
* available) for each tablet belonging to {@code tableId}
*/
Map<TabletId,TabletServerId> listTabletLocations(TableId tableId);
/**
* Fetch the tablets for the given table by asking the tablet server. Useful if your balance
* strategy needs details at the tablet level to decide what tablets to move.
*
* @param tabletServerId The tablet server to ask.
* @param tableId The table id
* @return a list of tablet statistics
* @throws AccumuloSecurityException tablet server disapproves of your internal System password.
* @throws AccumuloException any other problem
*/
List<TabletStatistics> listOnlineTabletsForTable(TabletServerId tabletServerId, TableId tableId)
throws AccumuloException, AccumuloSecurityException;
/**
* Retrieve the classloader context that is configured for {@code tableId}, or {@code null} if
* none is configured.
*/
String tableContext(TableId tableId);
}
| 9,811 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/HostRegexTableLoadBalancer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import static java.util.concurrent.TimeUnit.HOURS;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.function.Supplier;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.PluginEnvironment;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.manager.balancer.AssignmentParamsImpl;
import org.apache.accumulo.core.manager.balancer.BalanceParamsImpl;
import org.apache.accumulo.core.manager.balancer.TServerStatusImpl;
import org.apache.accumulo.core.manager.balancer.TableStatisticsImpl;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TableStatistics;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.apache.accumulo.core.spi.balancer.data.TabletStatistics;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
/**
* This balancer creates groups of tablet servers using user-provided regular expressions over the
* tablet server hostnames. Then it delegates to the table balancer to balance the tablets within
* the resulting group of tablet servers. All tablet servers that do not match a regex are grouped
* into a default group.<br>
* Regex properties for this balancer are specified as:<br>
* <b>table.custom.balancer.host.regex.<tablename>=<regex></b><br>
* Periodically (default 5m) this balancer will check to see if a tablet server is hosting tablets
* that it should not be according to the regex configuration. If this occurs then the offending
* tablets will be reassigned. This would cover the case where the configuration is changed and the
* manager is restarted while the tablet servers are up. To change the out of bounds check time
* period, set the following property:<br>
* <b>table.custom.balancer.host.regex.oob.period=5m</b><br>
* Regex matching can be based on either the host name (default) or host ip address. To set this
* balancer to match the regular expressions to the tablet server IP address, then set the following
* property:<br>
* <b>table.custom.balancer.host.regex.is.ip=true</b><br>
* It's possible that this balancer may create a lot of migrations. To limit the number of
* migrations that are created during a balance call, set the following property (default 250):<br>
* <b>table.custom.balancer.host.regex.concurrent.migrations</b> This balancer can continue
* balancing even if there are outstanding migrations. To limit the number of outstanding migrations
* in which this balancer will continue balancing, set the following property (default 0):<br>
* <b>table.custom.balancer.host.regex.max.outstanding.migrations</b>
*
* @since 2.1.0
*/
public class HostRegexTableLoadBalancer extends TableLoadBalancer {
private static final String PROP_PREFIX = Property.TABLE_ARBITRARY_PROP_PREFIX.getKey();
private static final Logger LOG = LoggerFactory.getLogger(HostRegexTableLoadBalancer.class);
public static final String HOST_BALANCER_PREFIX = PROP_PREFIX + "balancer.host.regex.";
public static final String HOST_BALANCER_OOB_CHECK_KEY =
PROP_PREFIX + "balancer.host.regex.oob.period";
private static final String HOST_BALANCER_OOB_DEFAULT = "5m";
public static final String HOST_BALANCER_REGEX_USING_IPS_KEY =
PROP_PREFIX + "balancer.host.regex.is.ip";
public static final String HOST_BALANCER_REGEX_MAX_MIGRATIONS_KEY =
PROP_PREFIX + "balancer.host.regex.concurrent.migrations";
private static final int HOST_BALANCER_REGEX_MAX_MIGRATIONS_DEFAULT = 250;
protected static final String DEFAULT_POOL = "HostTableLoadBalancer.ALL";
private static final int DEFAULT_OUTSTANDING_MIGRATIONS = 0;
public static final String HOST_BALANCER_OUTSTANDING_MIGRATIONS_KEY =
PROP_PREFIX + "balancer.host.regex.max.outstanding.migrations";
private static Map<String,String> getRegexes(PluginEnvironment.Configuration conf) {
Map<String,String> regexes = new HashMap<>();
Map<String,String> customProps = conf.getWithPrefix(PROP_PREFIX);
if (customProps != null && !customProps.isEmpty()) {
for (Entry<String,String> customProp : customProps.entrySet()) {
if (customProp.getKey().startsWith(HOST_BALANCER_PREFIX)) {
if (customProp.getKey().equals(HOST_BALANCER_OOB_CHECK_KEY)
|| customProp.getKey().equals(HOST_BALANCER_REGEX_USING_IPS_KEY)
|| customProp.getKey().equals(HOST_BALANCER_REGEX_MAX_MIGRATIONS_KEY)
|| customProp.getKey().equals(HOST_BALANCER_OUTSTANDING_MIGRATIONS_KEY)) {
continue;
}
String tableName = customProp.getKey().substring(HOST_BALANCER_PREFIX.length());
String regex = customProp.getValue();
regexes.put(tableName, regex);
}
}
}
return Map.copyOf(regexes);
}
/**
* Host Regex Table Load Balance Config
*/
static class HrtlbConf {
protected long oobCheckMillis =
ConfigurationTypeHelper.getTimeInMillis(HOST_BALANCER_OOB_DEFAULT);
private int maxTServerMigrations = HOST_BALANCER_REGEX_MAX_MIGRATIONS_DEFAULT;
private int maxOutstandingMigrations = DEFAULT_OUTSTANDING_MIGRATIONS;
private boolean isIpBasedRegex = false;
private final Map<String,String> regexes;
private final Map<String,Pattern> poolNameToRegexPattern;
HrtlbConf(PluginEnvironment.Configuration conf) {
System.out.println("building hrtlb conf");
String oobProperty = conf.get(HOST_BALANCER_OOB_CHECK_KEY);
if (oobProperty != null) {
oobCheckMillis = ConfigurationTypeHelper.getTimeInMillis(oobProperty);
}
String ipBased = conf.get(HOST_BALANCER_REGEX_USING_IPS_KEY);
if (ipBased != null) {
isIpBasedRegex = Boolean.parseBoolean(ipBased);
}
String migrations = conf.get(HOST_BALANCER_REGEX_MAX_MIGRATIONS_KEY);
if (migrations != null) {
maxTServerMigrations = Integer.parseInt(migrations);
}
String outstanding = conf.get(HOST_BALANCER_OUTSTANDING_MIGRATIONS_KEY);
if (outstanding != null) {
maxOutstandingMigrations = Integer.parseInt(outstanding);
}
this.regexes = getRegexes(conf);
Map<String,Pattern> poolNameToRegexPatternBuilder = new HashMap<>();
regexes.forEach((k, v) -> poolNameToRegexPatternBuilder.put(k, Pattern.compile(v)));
poolNameToRegexPattern = Map.copyOf(poolNameToRegexPatternBuilder);
}
}
private static final Set<TabletId> EMPTY_MIGRATIONS = Collections.emptySet();
private volatile long lastOOBCheck = System.currentTimeMillis();
private Map<String,SortedMap<TabletServerId,TServerStatus>> pools = new HashMap<>();
private final Map<TabletId,TabletMigration> migrationsFromLastPass = new HashMap<>();
private final Map<TableId,Long> tableToTimeSinceNoMigrations = new HashMap<>();
private Supplier<HrtlbConf> hrtlbConf;
private LoadingCache<TableId,Supplier<Map<String,String>>> tablesRegExCache;
/**
* Group the set of current tservers by pool name. Tservers that don't match a regex are put into
* a default pool. This could be expensive in the terms of the amount of time to recompute the
* groups, so HOST_BALANCER_POOL_RECHECK_KEY should be specified in the terms of minutes, not
* seconds or less.
*
* @param current map of current tservers
* @return current servers grouped by pool name, if not a match it is put into a default pool.
*/
protected synchronized Map<String,SortedMap<TabletServerId,TServerStatus>>
splitCurrentByRegex(SortedMap<TabletServerId,TServerStatus> current) {
LOG.debug("Performing pool recheck - regrouping tablet servers based on regular expressions");
Map<String,SortedMap<TabletServerId,TServerStatus>> newPools = new HashMap<>();
for (Entry<TabletServerId,TServerStatus> e : current.entrySet()) {
List<String> poolNames = getPoolNamesForHost(e.getKey());
for (String pool : poolNames) {
SortedMap<TabletServerId,TServerStatus> np = newPools.get(pool);
if (np == null) {
np = new TreeMap<>(current.comparator());
newPools.put(pool, np);
}
np.put(e.getKey(), e.getValue());
}
}
if (newPools.get(DEFAULT_POOL) == null) {
LOG.warn("Default pool is empty; assigning all tablet servers to the default pool");
SortedMap<TabletServerId,TServerStatus> dp = new TreeMap<>(current.comparator());
dp.putAll(current);
newPools.put(DEFAULT_POOL, dp);
}
pools = newPools;
LOG.trace("Pool to TabletServer mapping:");
if (LOG.isTraceEnabled()) {
for (Entry<String,SortedMap<TabletServerId,TServerStatus>> e : pools.entrySet()) {
LOG.trace("\tpool: {} -> tservers: {}", e.getKey(), e.getValue().keySet());
}
}
return pools;
}
/**
* Matches host against the regexes and returns the matching pool names
*
* @param tabletServerId tablet server host
* @return pool names, will return default pool if host matches more no regex
*/
protected List<String> getPoolNamesForHost(TabletServerId tabletServerId) {
final String host = tabletServerId.getHost();
String test = host;
if (!hrtlbConf.get().isIpBasedRegex) {
try {
test = getNameFromIp(host);
} catch (UnknownHostException e1) {
LOG.error("Unable to determine host name for IP: " + host + ", setting to default pool",
e1);
return Collections.singletonList(DEFAULT_POOL);
}
}
List<String> pools = new ArrayList<>();
for (Entry<String,Pattern> e : hrtlbConf.get().poolNameToRegexPattern.entrySet()) {
if (e.getValue().matcher(test).matches()) {
pools.add(e.getKey());
}
}
if (pools.isEmpty()) {
pools.add(DEFAULT_POOL);
}
return pools;
}
protected String getNameFromIp(String hostIp) throws UnknownHostException {
return InetAddress.getByName(hostIp).getHostName();
}
private void checkTableConfig(TableId tableId) {
Map<String,String> tableRegexes = tablesRegExCache.get(tableId).get();
if (!hrtlbConf.get().regexes.equals(tableRegexes)) {
LoggerFactory.getLogger(HostRegexTableLoadBalancer.class).warn(
"Table id {} has different config than system. The per table config is ignored.",
tableId);
}
}
/**
* Matches table name against pool names, returns matching pool name or DEFAULT_POOL.
*
* @param tableName name of table
* @return tablet server pool name (table name or DEFAULT_POOL)
*/
protected String getPoolNameForTable(String tableName) {
if (tableName == null) {
return DEFAULT_POOL;
}
return hrtlbConf.get().poolNameToRegexPattern.containsKey(tableName) ? tableName : DEFAULT_POOL;
}
@Override
public String toString() {
HrtlbConf myConf = hrtlbConf.get();
ToStringBuilder buf = new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE);
buf.append("\nTablet Out Of Bounds Check Interval", myConf.oobCheckMillis);
buf.append("\nMax Tablet Server Migrations", myConf.maxTServerMigrations);
buf.append("\nRegular Expressions use IPs", myConf.isIpBasedRegex);
buf.append("\nPools", myConf.poolNameToRegexPattern);
return buf.toString();
}
public Map<String,Pattern> getPoolNameToRegexPattern() {
return hrtlbConf.get().poolNameToRegexPattern;
}
public int getMaxMigrations() {
return hrtlbConf.get().maxTServerMigrations;
}
public int getMaxOutstandingMigrations() {
return hrtlbConf.get().maxOutstandingMigrations;
}
public long getOobCheckMillis() {
return hrtlbConf.get().oobCheckMillis;
}
public boolean isIpBasedRegex() {
return hrtlbConf.get().isIpBasedRegex;
}
@Override
public void init(BalancerEnvironment balancerEnvironment) {
super.init(balancerEnvironment);
this.hrtlbConf = balancerEnvironment.getConfiguration().getDerived(HrtlbConf::new);
tablesRegExCache =
Caffeine.newBuilder().expireAfterAccess(1, HOURS).build(key -> balancerEnvironment
.getConfiguration(key).getDerived(HostRegexTableLoadBalancer::getRegexes));
LOG.info("{}", this);
}
@Override
public void getAssignments(AssignmentParameters params) {
Map<String,SortedMap<TabletServerId,TServerStatus>> pools =
splitCurrentByRegex(params.currentStatus());
// group the unassigned into tables
Map<TableId,Map<TabletId,TabletServerId>> groupedUnassigned = new HashMap<>();
params.unassignedTablets().forEach((ke, lastTserver) -> groupedUnassigned
.computeIfAbsent(ke.getTable(), k -> new HashMap<>()).put(ke, lastTserver));
Map<TableId,String> tableIdToTableName = environment.getTableIdMap().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getValue, Map.Entry::getKey));
// Send a view of the current servers to the tables tablet balancer
for (Entry<TableId,Map<TabletId,TabletServerId>> e : groupedUnassigned.entrySet()) {
Map<TabletId,TabletServerId> newAssignments = new HashMap<>();
String tableName = tableIdToTableName.get(e.getKey());
String poolName = getPoolNameForTable(tableName);
SortedMap<TabletServerId,TServerStatus> currentView = pools.get(poolName);
if (currentView == null || currentView.isEmpty()) {
LOG.warn("No tablet servers online for table {}, assigning within default pool", tableName);
currentView = pools.get(DEFAULT_POOL);
if (currentView == null) {
LOG.error(
"No tablet servers exist in the default pool, unable to assign tablets for table {}",
tableName);
continue;
}
}
LOG.debug("Sending {} tablets to balancer for table {} for assignment within tservers {}",
e.getValue().size(), tableName, currentView.keySet());
getBalancerForTable(e.getKey())
.getAssignments(new AssignmentParamsImpl(currentView, e.getValue(), newAssignments));
newAssignments.forEach(params::addAssignment);
}
}
@Override
public long balance(BalanceParameters params) {
long minBalanceTime = 20_000;
// Iterate over the tables and balance each of them
Map<String,TableId> tableIdMap = environment.getTableIdMap();
Map<TableId,String> tableIdToTableName = tableIdMap.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getValue, Map.Entry::getKey));
tableIdToTableName.keySet().forEach(this::checkTableConfig);
long now = System.currentTimeMillis();
HrtlbConf myConf = hrtlbConf.get();
SortedMap<TabletServerId,TServerStatus> current = params.currentStatus();
Set<TabletId> migrations = params.currentMigrations();
List<TabletMigration> migrationsOut = params.migrationsOut();
Map<String,SortedMap<TabletServerId,TServerStatus>> currentGrouped =
splitCurrentByRegex(params.currentStatus());
if ((now - this.lastOOBCheck) > myConf.oobCheckMillis) {
try {
// Check to see if a tablet is assigned outside the bounds of the pool. If so, migrate it.
for (String table : tableIdMap.keySet()) {
LOG.debug("Checking for out of bounds tablets for table {}", table);
String tablePoolName = getPoolNameForTable(table);
for (Entry<TabletServerId,TServerStatus> e : current.entrySet()) {
// pool names are the same as table names, except in the DEFAULT case.
// If this table is assigned to a pool for this host, then move on.
List<String> hostPools = getPoolNamesForHost(e.getKey());
if (hostPools.contains(tablePoolName)) {
continue;
}
TableId tid = tableIdMap.get(table);
if (tid == null) {
LOG.warn("Unable to check for out of bounds tablets for table {},"
+ " it may have been deleted or renamed.", table);
continue;
}
try {
List<TabletStatistics> outOfBoundsTablets = getOnlineTabletsForTable(e.getKey(), tid);
if (outOfBoundsTablets == null) {
continue;
}
for (TabletStatistics ts : outOfBoundsTablets) {
if (migrations.contains(ts.getTabletId())) {
LOG.debug("Migration for out of bounds tablet {} has already been requested",
ts.getTabletId());
continue;
}
String poolName = getPoolNameForTable(table);
SortedMap<TabletServerId,TServerStatus> currentView = currentGrouped.get(poolName);
if (currentView != null) {
int skip = RANDOM.get().nextInt(currentView.size());
Iterator<TabletServerId> iter = currentView.keySet().iterator();
for (int i = 0; i < skip; i++) {
iter.next();
}
TabletServerId nextTS = iter.next();
LOG.info(
"Tablet {} is currently outside the bounds of the"
+ " regex, migrating from {} to {}",
ts.getTabletId(), e.getKey(), nextTS);
migrationsOut.add(new TabletMigration(ts.getTabletId(), e.getKey(), nextTS));
if (migrationsOut.size() >= myConf.maxTServerMigrations) {
break;
}
} else {
LOG.warn("No tablet servers online for pool {}, unable to"
+ " migrate out of bounds tablets", poolName);
}
}
} catch (AccumuloException | AccumuloSecurityException e1) {
LOG.error("Error in OOB check getting tablets for table {} from server {} {}", tid,
e.getKey().getHost(), e);
}
}
}
} finally {
// this could have taken a while...get a new time
this.lastOOBCheck = System.currentTimeMillis();
}
}
if (!migrationsOut.isEmpty()) {
LOG.warn("Not balancing tables due to moving {} out of bounds tablets", migrationsOut.size());
LOG.info("Migrating out of bounds tablets: {}", migrationsOut);
return minBalanceTime;
}
if (migrations != null && !migrations.isEmpty()) {
if (migrations.size() >= myConf.maxOutstandingMigrations) {
LOG.warn("Not balancing tables due to {} outstanding migrations", migrations.size());
if (LOG.isTraceEnabled()) {
LOG.trace("Sample up to 10 outstanding migrations: {}", limitTen(migrations));
}
return minBalanceTime;
}
LOG.debug("Current outstanding migrations of {} being applied", migrations.size());
if (LOG.isTraceEnabled()) {
LOG.trace("Sample up to 10 outstanding migrations: {}", limitTen(migrations));
}
migrationsFromLastPass.keySet().retainAll(migrations);
SortedMap<TabletServerId,TServerStatusImpl> currentCopy = new TreeMap<>();
current.forEach((tid, status) -> currentCopy.put(tid, (TServerStatusImpl) status));
Multimap<TabletServerId,String> serverTableIdCopied = HashMultimap.create();
for (TabletMigration migration : migrationsFromLastPass.values()) {
TableStatisticsImpl fromInfo = getTableInfo(currentCopy, serverTableIdCopied,
migration.getTablet().getTable().canonical(), migration.getOldTabletServer());
if (fromInfo != null) {
fromInfo.setOnlineTabletCount(fromInfo.getOnlineTabletCount() - 1);
}
TableStatisticsImpl toInfo = getTableInfo(currentCopy, serverTableIdCopied,
migration.getTablet().getTable().canonical(), migration.getNewTabletServer());
if (toInfo != null) {
toInfo.setOnlineTabletCount(toInfo.getOnlineTabletCount() + 1);
}
}
migrations = EMPTY_MIGRATIONS;
} else {
migrationsFromLastPass.clear();
}
for (TableId tableId : tableIdMap.values()) {
String tableName = tableIdToTableName.get(tableId);
String regexTableName = getPoolNameForTable(tableName);
SortedMap<TabletServerId,TServerStatus> currentView = currentGrouped.get(regexTableName);
if (currentView == null) {
LOG.warn("Skipping balance for table {} as no tablet servers are online.", tableName);
continue;
}
ArrayList<TabletMigration> newMigrations = new ArrayList<>();
getBalancerForTable(tableId)
.balance(new BalanceParamsImpl(currentView, migrations, newMigrations));
if (newMigrations.isEmpty()) {
tableToTimeSinceNoMigrations.remove(tableId);
} else if (tableToTimeSinceNoMigrations.containsKey(tableId)) {
if ((now - tableToTimeSinceNoMigrations.get(tableId)) > HOURS.toMillis(1)) {
LOG.warn("We have been consistently producing migrations for {}: {}", tableName,
limitTen(newMigrations));
}
} else {
tableToTimeSinceNoMigrations.put(tableId, now);
}
migrationsOut.addAll(newMigrations);
if (migrationsOut.size() >= myConf.maxTServerMigrations) {
break;
}
}
for (TabletMigration migration : migrationsOut) {
migrationsFromLastPass.put(migration.getTablet(), migration);
}
LOG.info("Migrating tablets for balance: {}", migrationsOut);
return minBalanceTime;
}
protected List<TabletStatistics> getOnlineTabletsForTable(TabletServerId tabletServerId,
TableId tableId) throws AccumuloSecurityException, AccumuloException {
return environment.listOnlineTabletsForTable(tabletServerId, tableId);
}
/**
* Get a mutable table info for the specified table and server
*/
private TableStatisticsImpl getTableInfo(SortedMap<TabletServerId,TServerStatusImpl> currentCopy,
Multimap<TabletServerId,String> serverTableIdCopied, String tableId, TabletServerId server) {
TableStatisticsImpl newInfo = null;
if (currentCopy.containsKey(server)) {
Map<String,TableStatistics> newTableMap = currentCopy.get(server).getTableMap();
if (newTableMap != null) {
newInfo = (TableStatisticsImpl) newTableMap.get(tableId);
if (newInfo != null) {
Collection<String> tableIdCopied = serverTableIdCopied.get(server);
if (tableIdCopied.isEmpty()) {
newTableMap = new HashMap<>(newTableMap);
currentCopy.get(server).setTableMap(newTableMap);
}
if (!tableIdCopied.contains(tableId)) {
newInfo = new TableStatisticsImpl(newInfo);
newTableMap.put(tableId, newInfo);
tableIdCopied.add(tableId);
}
}
}
}
return newInfo;
}
// helper to prepare log messages
private static String limitTen(Collection<?> iterable) {
return iterable.stream().limit(10).map(String::valueOf)
.collect(Collectors.joining(", ", "[", "]"));
}
}
| 9,812 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/util/ThrottledBalancerProblemReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer.util;
import java.util.Collections;
import java.util.Set;
import java.util.WeakHashMap;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.accumulo.core.data.TabletId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Throttles logging of balancer problems by storing the last log time for each problem and limiting
* reports to once per minute. The logger is generated internally from the supplied class in order
* to adhere to the requirements for the SPI package (which prohibits having the Logger class in the
* API).
*/
public class ThrottledBalancerProblemReporter {
public interface Problem {
void report();
}
public interface OutstandingMigrationsProblem extends Problem {
void setMigrations(Set<TabletId> migrations);
}
private static final long TIME_BETWEEN_WARNINGS = TimeUnit.SECONDS.toMillis(60);
private final WeakHashMap<Problem,Long> problemReportTimes = new WeakHashMap<>();
private final Logger log;
public ThrottledBalancerProblemReporter(Class<?> loggerClass) {
log = LoggerFactory.getLogger(loggerClass);
}
/**
* Create a new problem reporter to indicate there are no tablet servers available and balancing
* could not be performed. Balancers should only create a single instance of this problem, and
* reuse each time the problem is reported.
*/
public Problem createNoTabletServersProblem() {
return () -> log.warn("Not balancing because we don't have any tservers.");
}
/**
* Create a new problem reporter to indicate that balancing could not be performed due to the
* existence of outstanding migrations. Balancers should only create a single instance of this
* problem and update its migrations list before each report.
*/
public OutstandingMigrationsProblem createOutstandingMigrationsProblem() {
return new OutstandingMigrationsProblem() {
private Set<TabletId> migrations = Collections.emptySet();
@Override
public void setMigrations(Set<TabletId> migrations) {
this.migrations = migrations;
}
@Override
public void report() {
log.warn("Not balancing due to {} outstanding migrations.", migrations.size());
/*
* TODO ACCUMULO-2938 redact key extents in this output to avoid leaking protected
* information.
*/
if (log.isDebugEnabled()) {
log.debug("Sample up to 10 outstanding migrations: {}",
migrations.stream().limit(10).map(String::valueOf).collect(Collectors.joining(", ")));
}
// Now that we've reported, clear out the migrations list so we don't hold it in memory.
migrations = Collections.emptySet();
}
};
}
/**
* Reports a balance problem. The {@link Problem#report()} will only be called up to once a minute
* for each problem that is reported repeatedly.
*/
public void reportProblem(Problem problem) {
long reportTime = problemReportTimes.getOrDefault(problem, -1L);
if ((System.currentTimeMillis() - reportTime) > TIME_BETWEEN_WARNINGS) {
problem.report();
problemReportTimes.put(problem, System.currentTimeMillis());
}
}
/**
* Clears reported problems so that a problem report will be logged immediately the next time
* {@link #reportProblem(Problem)} is invoked.
*/
public void clearProblemReportTimes() {
problemReportTimes.clear();
}
}
| 9,813 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/data/TabletMigration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer.data;
import static java.util.Objects.requireNonNull;
import java.util.Objects;
import org.apache.accumulo.core.data.TabletId;
/**
* @since 2.1.0
*/
public class TabletMigration {
private final TabletId tabletId;
private final TabletServerId oldTabletServer;
private final TabletServerId newTabletServer;
public TabletMigration(TabletId tabletId, TabletServerId oldTabletServer,
TabletServerId newTabletServer) {
this.tabletId = requireNonNull(tabletId);
this.oldTabletServer = requireNonNull(oldTabletServer);
this.newTabletServer = requireNonNull(newTabletServer);
}
public TabletId getTablet() {
return tabletId;
}
public TabletServerId getOldTabletServer() {
return oldTabletServer;
}
public TabletServerId getNewTabletServer() {
return newTabletServer;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TabletMigration that = (TabletMigration) o;
return tabletId.equals(that.tabletId) && oldTabletServer.equals(that.oldTabletServer)
&& newTabletServer.equals(that.newTabletServer);
}
@Override
public int hashCode() {
return Objects.hash(tabletId, oldTabletServer, newTabletServer);
}
@Override
public String toString() {
return tabletId + ": " + oldTabletServer + " -> " + newTabletServer;
}
}
| 9,814 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/data/TabletServerId.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer.data;
/**
* @since 2.1.0
*/
public interface TabletServerId extends Comparable<TabletServerId> {
String getHost();
int getPort();
String getSession();
}
| 9,815 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/data/TableStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer.data;
/**
* @since 2.1.0
*/
public interface TableStatistics extends Comparable<TableStatistics> {
long getRecords();
long getRecordsInMemory();
int getTabletCount();
int getOnlineTabletCount();
double getIngestRate();
double getIngestByteRate();
double getQueryRate();
double getQueryByteRate();
double getScanRate();
}
| 9,816 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/data/TabletStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer.data;
import org.apache.accumulo.core.data.TabletId;
/**
* @since 2.1.0
*/
public interface TabletStatistics extends Comparable<TabletStatistics> {
TabletId getTabletId();
long getNumEntries();
long getSplitCreationTime();
double getIngestRate();
double getQueryRate();
}
| 9,817 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/balancer/data/TServerStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer.data;
import java.util.Map;
/**
* @since 2.1.0
*/
public interface TServerStatus extends Comparable<TServerStatus> {
Map<String,TableStatistics> getTableMap();
long getLastContact();
String getName();
double getOsLoad();
long getHoldTime();
long getLookups();
long getIndexCacheHits();
long getIndexCacheRequests();
long getDataCacheHits();
long getDataCacheRequests();
long getFlushes();
long getSyncs();
String getVersion();
long getResponseTime();
}
| 9,818 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/fs/VolumeChooser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.fs;
import java.util.Set;
import org.apache.accumulo.core.conf.Property;
/**
* Helper used to select a volume from a set of Volume URIs.
* <p>
* Volumes will be selected based on defined option criteria. Note: Implementations must be
* threadsafe.<br>
* VolumeChooser.equals will be used for internal caching.<br>
* <p>
* Property Details:<br>
* {@link Property#GENERAL_ARBITRARY_PROP_PREFIX} and {@link Property#TABLE_ARBITRARY_PROP_PREFIX}
* can be used to define user-specific properties to ensure separation from Accumulo System
* defaults.<br>
*
* Note: The default VolumeChooser implementation is set by {@link Property#GENERAL_VOLUME_CHOOSER}.
*
* @since 2.1.0
*/
public interface VolumeChooser {
/**
* Choose a volume from the provided options.
*
* @param env the server environment provided by the calling framework
* @param options the list of volumes to choose from
* @return a volume from the list of volume options
*/
String choose(VolumeChooserEnvironment env, Set<String> options);
/**
* Return the subset of all possible volumes that could be chosen across all invocations of
* {@link #choose(VolumeChooserEnvironment, Set)}.<br>
*
* This is currently used to determine if the chosen volumes can support the required filesystem
* operations for write ahead logs.<br>
*
* There may be other use cases in the future.
*/
Set<String> choosable(VolumeChooserEnvironment env, Set<String> options);
}
| 9,819 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/fs/PreferredVolumeChooser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.fs;
import java.util.Arrays;
import java.util.Collections;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment.Scope;
import org.apache.accumulo.core.volume.Volume;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link RandomVolumeChooser} that selects preferred volumes to use from the provided volume
* options. Preferred Volumes are set on either the per-table, per-scope, or default configuration
* level. Configuration details are overridden based on the top-down "Default","Site","System"
* scopes.
* <p>
* Defaults to selecting a volume at random from the provided volume options.
*
* <p>
* Property Details:
* <p>
* Preferred volumes can be set on a per-table basis via the custom property
* {@code volume.preferred}.
*
* <p>
* This property should contain a comma separated list of {@link Volume} URIs. Since this is a
* custom property, it can be accessed under the prefix {@code table.custom}.<br>
*
* The {@code volume.preferred} property can be set at various configuration levels depending on the
* scope. Note: Both the property name and the format of its value are specific to this particular
* implementation.
* <table border="1">
* <caption>Scope Property Lookups and Defaults locations</caption>
* <tr>
* <th>Scope</th>
* <th>Property Value Lookup</th>
* <th>Default Property Lookup</th>
* </tr>
* <tr>
* <td>{@code Scope.DEFAULT}</td>
* <td>{@code general.custom.volume.preferred.default}</td>
* <td>Throws RuntimeException if not set</td>
* </tr>
* <tr>
* <td>{@code Scope.INIT}</td>
* <td>{@code general.custom.volume.preferred.init}</td>
* <td>{@code general.custom.volume.preferred.default}</td>
* </tr>
* <tr>
* <td>{@code Scope.LOGGER}</td>
* <td>{@code general.custom.volume.preferred.logger}</td>
* <td>{@code general.custom.volume.preferred.default}</td>
* </tr>
* <tr>
* <td>{@code Scope.TABLE}</td>
* <td>{@code table.custom.volume.preferred}</td>
* <td>{@code general.custom.volume.preferred.default}</td>
* </tr>
* </table>
* <br>
* <p>
* Examples of expected usage:
* <ul>
* <li>Separate metadata table and write ahead logs from general data location.
*
* <pre>
* <code>
* // Set list of volumes
* instance.volumes=hdfs://namenode_A/accumulo,hdfs://namenode_B/general
* // Enable the preferred volume chooser
* general.volume.chooser=org.apache.accumulo.core.spi.fs.PreferredVolumeChooser
* // Set default preferred volume
* general.custom.volume.preferred.default=hdfs://namenode_B/general
* // Set write-ahead log preferred volume
* general.custom.volume.preferred.logger=hdfs://namenode_A/accumulo
* // Initialize and start accumulo
* // Once accumulo is up, open the shell and configure the metadata table to use a preferred volume
* config -t accumulo.metadata -s table.custom.volume.preferred=hdfs://namenode_A/accumulo
* </code>
* </pre>
*
* </li>
* <li>Allow general data to use all volumes, but limit a specific table to a preferred volume
*
* <pre>
* <code>
* // Set list of volumes
* instance.volumes=hdfs://namenode/accumulo,hdfs://namenode/accumulo_select
* // Enable the preferred volume chooser
* general.volume.chooser=org.apache.accumulo.core.spi.fs.PreferredVolumeChooser
* // Set default preferred volumes
* general.custom.volume.preferred.default=hdfs://namenode/accumulo,hdfs://namenode/accumulo_select
* // Initialize and start accumulo
* // Once accumulo is up, open the shell and configure the metadata table to use a preferred volume
* config -t accumulo.metadata -s table.custom.volume.preferred=hdfs://namenode/accumulo_select
* </code>
* </pre>
*
* </li>
* </ul>
*
* @since 2.1.0
*/
public class PreferredVolumeChooser extends RandomVolumeChooser {
private static final Logger log = LoggerFactory.getLogger(PreferredVolumeChooser.class);
private static final String TABLE_CUSTOM_SUFFIX = "volume.preferred";
private static final String getCustomPropertySuffix(Scope scope) {
return "volume.preferred." + scope.name().toLowerCase();
}
private static final String DEFAULT_SCOPED_PREFERRED_VOLUMES =
getCustomPropertySuffix(Scope.DEFAULT);
@Override
public String choose(VolumeChooserEnvironment env, Set<String> options) {
log.trace("{}.choose", getClass().getSimpleName());
// Randomly choose the volume from the preferred volumes
String choice = super.choose(env, getPreferredVolumes(env, options));
log.trace("Choice = {}", choice);
return choice;
}
/**
*
* Returns the subset of volumes that match the defined preferred volumes value
*
* @param env the server environment provided by the calling framework
* @param options the subset of volumes to choose from
* @return an array of preferred volumes that are a subset of {@link Property#INSTANCE_VOLUMES}
*/
@Override
public Set<String> choosable(VolumeChooserEnvironment env, Set<String> options) {
return getPreferredVolumes(env, options);
}
// visible (not private) for testing
Set<String> getPreferredVolumes(VolumeChooserEnvironment env, Set<String> options) {
if (env.getChooserScope() == Scope.TABLE) {
return getPreferredVolumesForTable(env, options);
}
return getPreferredVolumesForScope(env, options);
}
private Set<String> getPreferredVolumesForTable(VolumeChooserEnvironment env,
Set<String> options) {
log.trace("Looking up property {} + for Table id: {}", TABLE_CUSTOM_SUFFIX, env.getTable());
String preferredVolumes = env.getServiceEnv().getConfiguration(env.getTable().orElseThrow())
.getTableCustom(TABLE_CUSTOM_SUFFIX);
// fall back to global default scope, so setting only one default is necessary, rather than a
// separate default for TABLE scope than other scopes
if (preferredVolumes == null || preferredVolumes.isEmpty()) {
preferredVolumes =
env.getServiceEnv().getConfiguration().getCustom(DEFAULT_SCOPED_PREFERRED_VOLUMES);
}
// throw an error if volumes not specified or empty
if (preferredVolumes == null || preferredVolumes.isEmpty()) {
String msg = "Property " + TABLE_CUSTOM_SUFFIX + " or " + DEFAULT_SCOPED_PREFERRED_VOLUMES
+ " must be a subset of " + options + " to use the " + getClass().getSimpleName();
throw new IllegalArgumentException(msg);
}
return parsePreferred(TABLE_CUSTOM_SUFFIX, preferredVolumes, options);
}
private Set<String> getPreferredVolumesForScope(VolumeChooserEnvironment env,
Set<String> options) {
Scope scope = env.getChooserScope();
String property = getCustomPropertySuffix(scope);
log.trace("Looking up property {} for scope: {}", property, scope);
String preferredVolumes = env.getServiceEnv().getConfiguration().getCustom(property);
// fall back to global default scope if this scope isn't configured (and not already default
// scope)
if ((preferredVolumes == null || preferredVolumes.isEmpty()) && scope != Scope.DEFAULT) {
log.debug("{} not found; using {}", property, DEFAULT_SCOPED_PREFERRED_VOLUMES);
preferredVolumes =
env.getServiceEnv().getConfiguration().getCustom(DEFAULT_SCOPED_PREFERRED_VOLUMES);
// only if the custom property is not set to we fall back to the default scoped preferred
// volumes
if (preferredVolumes == null || preferredVolumes.isEmpty()) {
String msg = "Property " + property + " or " + DEFAULT_SCOPED_PREFERRED_VOLUMES
+ " must be a subset of " + options + " to use the " + getClass().getSimpleName();
throw new IllegalArgumentException(msg);
}
property = DEFAULT_SCOPED_PREFERRED_VOLUMES;
}
return parsePreferred(property, preferredVolumes, options);
}
private Set<String> parsePreferred(String property, String preferredVolumes,
Set<String> options) {
log.trace("Found {} = {}", property, preferredVolumes);
Set<String> preferred =
Arrays.stream(preferredVolumes.split(",")).map(String::trim).collect(Collectors.toSet());
if (preferred.isEmpty()) {
String msg = "No volumes could be parsed from '" + property + "', which had a value of '"
+ preferredVolumes + "'";
throw new IllegalArgumentException(msg);
}
// preferred volumes should also exist in the original options (typically, from
// instance.volumes)
if (Collections.disjoint(preferred, options)) {
String msg = "Some volumes in " + preferred + " are not valid volumes from " + options;
throw new IllegalArgumentException(msg);
}
return preferred;
}
}
| 9,820 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/fs/RandomVolumeChooser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.fs;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.util.Set;
/**
* A {@link VolumeChooser} that selects a volume at random from the list of provided volumes.
*
* @since 2.1.0
*/
public class RandomVolumeChooser implements VolumeChooser {
/**
* Selects a volume at random from the provided set of volumes. The environment scope is not
* utilized.
*/
@Override
public String choose(VolumeChooserEnvironment env, Set<String> options) {
String[] optionsArray = options.toArray(new String[0]);
return optionsArray[RANDOM.get().nextInt(optionsArray.length)];
}
/**
* @return same set of volume options that were originally provided.
*/
@Override
public Set<String> choosable(VolumeChooserEnvironment env, Set<String> options) {
return options;
}
}
| 9,821 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/fs/SpaceAwareVolumeChooser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.fs;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.io.IOException;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
/**
* A {@link PreferredVolumeChooser} that takes remaining HDFS space into account when making a
* volume choice rather than a simpler round-robin. The list of volumes to use can be limited using
* the same properties as {@link PreferredVolumeChooser}
*
* @since 2.1.0
*/
public class SpaceAwareVolumeChooser extends PreferredVolumeChooser {
public static final String RECOMPUTE_INTERVAL = "spaceaware.volume.chooser.recompute.interval";
// Default time to wait in ms. Defaults to 5 min
private long defaultComputationCacheDuration = 300000;
private LoadingCache<Set<String>,WeightedRandomCollection> choiceCache = null;
private static final Logger log = LoggerFactory.getLogger(SpaceAwareVolumeChooser.class);
private Configuration conf = new Configuration();
protected double getFreeSpace(String uri) throws IOException {
FileSystem pathFs = new Path(uri).getFileSystem(conf);
FsStatus optionStatus = pathFs.getStatus();
return ((double) optionStatus.getRemaining() / optionStatus.getCapacity());
}
@Override
public String choose(VolumeChooserEnvironment env, Set<String> options) {
return getCache(env).get(getPreferredVolumes(env, options)).next();
}
private synchronized LoadingCache<Set<String>,WeightedRandomCollection>
getCache(VolumeChooserEnvironment env) {
if (choiceCache == null) {
String propertyValue = env.getServiceEnv().getConfiguration().getCustom(RECOMPUTE_INTERVAL);
long computationCacheDuration = StringUtils.isNotBlank(propertyValue)
? Long.parseLong(propertyValue) : defaultComputationCacheDuration;
choiceCache = Caffeine.newBuilder().expireAfterWrite(computationCacheDuration, MILLISECONDS)
.build(key -> new WeightedRandomCollection(key, env));
}
return choiceCache;
}
private class WeightedRandomCollection {
private final NavigableMap<Double,String> map = new TreeMap<>();
private double total = 0;
private WeightedRandomCollection(Set<String> options, VolumeChooserEnvironment env) {
if (options.size() < 1) {
throw new IllegalStateException("Options was empty! No valid volumes to choose from.");
}
// Compute percentage space available on each volume
for (String option : options) {
try {
double percentFree = getFreeSpace(option);
add(percentFree, option);
} catch (IOException e) {
log.error("Unable to get file system status for" + option, e);
}
}
if (map.size() < 1) {
throw new IllegalStateException(
"Weighted options was empty! Could indicate an issue getting file system status or "
+ "no free space on any volume");
}
}
public WeightedRandomCollection add(double weight, String result) {
if (weight <= 0) {
log.info("Weight was 0. Not adding " + result);
return this;
}
total += weight;
map.put(total, result);
return this;
}
public String next() {
double value = RANDOM.get().nextDouble() * total;
return map.higherEntry(value).getValue();
}
}
}
| 9,822 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/fs/DelegatingChooser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.fs;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment.Scope;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link VolumeChooser} that delegates to another volume chooser based on other properties:
* table.custom.volume.chooser for tables, and general.custom.volume.chooser.scoped for scopes.
* general.custom.volume.chooser.{scope} can override the system-wide setting for
* general.custom.volume.chooser.scoped. At the time this was written, the only known scope was
* "logger".
*
* @since 2.1.0
*/
public class DelegatingChooser implements VolumeChooser {
private static final Logger log = LoggerFactory.getLogger(DelegatingChooser.class);
/* Track VolumeChooser instances so they can keep state. */
private final ConcurrentHashMap<TableId,VolumeChooser> tableSpecificChooserCache =
new ConcurrentHashMap<>();
private final ConcurrentHashMap<Scope,VolumeChooser> scopeSpecificChooserCache =
new ConcurrentHashMap<>();
private static final String TABLE_CUSTOM_SUFFIX = "volume.chooser";
private static final String getCustomPropertySuffix(Scope scope) {
return "volume.chooser." + scope.name().toLowerCase();
}
private static final String DEFAULT_SCOPED_VOLUME_CHOOSER =
getCustomPropertySuffix(Scope.DEFAULT);
@Override
public String choose(VolumeChooserEnvironment env, Set<String> options) {
log.trace("{}.choose", getClass().getSimpleName());
return getDelegateChooser(env).choose(env, options);
}
@Override
public Set<String> choosable(VolumeChooserEnvironment env, Set<String> options) {
return getDelegateChooser(env).choosable(env, options);
}
// visible (not private) for testing
VolumeChooser getDelegateChooser(VolumeChooserEnvironment env) {
if (env.getChooserScope() == Scope.TABLE) {
return getVolumeChooserForTable(env);
}
return getVolumeChooserForScope(env);
}
private VolumeChooser getVolumeChooserForTable(VolumeChooserEnvironment env) {
log.trace("Looking up property {} for table id: {}", TABLE_CUSTOM_SUFFIX, env.getTable());
String clazz = env.getServiceEnv().getConfiguration(env.getTable().orElseThrow())
.getTableCustom(TABLE_CUSTOM_SUFFIX);
// fall back to global default scope, so setting only one default is necessary, rather than a
// separate default for TABLE scope than other scopes
if (clazz == null || clazz.isEmpty()) {
clazz = env.getServiceEnv().getConfiguration().getCustom(DEFAULT_SCOPED_VOLUME_CHOOSER);
}
if (clazz == null || clazz.isEmpty()) {
String msg = "Property " + Property.TABLE_ARBITRARY_PROP_PREFIX + TABLE_CUSTOM_SUFFIX + " or "
+ Property.GENERAL_ARBITRARY_PROP_PREFIX + DEFAULT_SCOPED_VOLUME_CHOOSER
+ " must be a valid " + VolumeChooser.class.getSimpleName() + " to use the "
+ getClass().getSimpleName();
throw new IllegalStateException(msg);
}
return createVolumeChooser(env, clazz, TABLE_CUSTOM_SUFFIX, env.getTable().orElseThrow(),
tableSpecificChooserCache);
}
private VolumeChooser getVolumeChooserForScope(VolumeChooserEnvironment env) {
Scope scope = env.getChooserScope();
String property = getCustomPropertySuffix(scope);
log.trace("Looking up property {} for scope: {}", property, scope);
String clazz = env.getServiceEnv().getConfiguration().getCustom(property);
// fall back to global default scope if this scope isn't configured (and not already default
// scope)
if ((clazz == null || clazz.isEmpty()) && scope != Scope.DEFAULT) {
log.debug("{} not found; using {}", Property.TABLE_ARBITRARY_PROP_PREFIX + property,
Property.GENERAL_ARBITRARY_PROP_PREFIX + DEFAULT_SCOPED_VOLUME_CHOOSER);
clazz = env.getServiceEnv().getConfiguration().getCustom(DEFAULT_SCOPED_VOLUME_CHOOSER);
if (clazz == null || clazz.isEmpty()) {
String msg = "Property " + Property.TABLE_ARBITRARY_PROP_PREFIX + property + " or "
+ Property.GENERAL_ARBITRARY_PROP_PREFIX + DEFAULT_SCOPED_VOLUME_CHOOSER
+ " must be a valid " + VolumeChooser.class.getSimpleName() + " to use the "
+ getClass().getSimpleName();
throw new IllegalStateException(msg);
}
property = DEFAULT_SCOPED_VOLUME_CHOOSER;
}
return createVolumeChooser(env, clazz, property, scope, scopeSpecificChooserCache);
}
/**
* Create a volume chooser, using the cached version if any. This will replace the cached version
* if the class name has changed.
*
* @param clazz The volume chooser class name
* @param property The property from which it was obtained
* @param key The key to user in the cache
* @param cache The cache
* @return The volume chooser instance
*/
private <T> VolumeChooser createVolumeChooser(VolumeChooserEnvironment env, String clazz,
String property, T key, ConcurrentHashMap<T,VolumeChooser> cache) {
final String className = clazz.trim();
// create a new instance, unless another thread beat us with one of the same class name, then
// use theirs
return cache.compute(key, (k, previousChooser) -> {
if (previousChooser != null && previousChooser.getClass().getName().equals(className)) {
// no change; return the old one
return previousChooser;
} else if (previousChooser == null) {
// TODO stricter definition of when the updated property is used, ref ACCUMULO-3412
// don't log change if this is the first use
log.trace("Change detected for {} for {}", property, key);
}
try {
if (key instanceof TableId) {
TableId tableId = (TableId) key;
return env.getServiceEnv().instantiate(tableId, className, VolumeChooser.class);
} else {
return env.getServiceEnv().instantiate(className, VolumeChooser.class);
}
} catch (ReflectiveOperationException e) {
String msg = "Failed to create instance for " + key + " configured to use " + className
+ " via " + property;
throw new IllegalStateException(msg, e);
}
});
}
}
| 9,823 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/spi/fs/VolumeChooserEnvironment.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.fs;
import java.util.Optional;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.hadoop.io.Text;
/**
* @since 2.1.0
*/
public interface VolumeChooserEnvironment {
/**
* A scope for the volume chooser environment; a TABLE scope should be accompanied by a tableId.
*
* @since 2.1.0
*/
public static enum Scope {
DEFAULT, TABLE, INIT, LOGGER
}
/**
* The end row of the tablet for which a volume is being chosen. Only call this when the scope is
* TABLE
*
* @since 2.0.0
*/
public Text getEndRow();
public Optional<TableId> getTable();
public Scope getChooserScope();
public ServiceEnvironment getServiceEnv();
}
| 9,824 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ScannerIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.SampleNotPresentException;
import org.apache.accumulo.core.client.ScannerBase.ConsistencyLevel;
import org.apache.accumulo.core.client.TableDeletedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.TableOfflineException;
import org.apache.accumulo.core.clientImpl.ThriftScanner.ScanState;
import org.apache.accumulo.core.clientImpl.ThriftScanner.ScanTimedOutException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.KeyValue;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
public class ScannerIterator implements Iterator<Entry<Key,Value>> {
// scanner options
private long timeOut;
// scanner state
private Iterator<KeyValue> iter;
private final ScanState scanState;
private ScannerOptions options;
private Future<List<KeyValue>> readAheadOperation;
private boolean finished = false;
private long batchCount = 0;
private long readaheadThreshold;
private ScannerImpl.Reporter reporter;
private final ClientContext context;
private AtomicBoolean closed = new AtomicBoolean(false);
ScannerIterator(ClientContext context, TableId tableId, Authorizations authorizations,
Range range, int size, long timeOut, ScannerOptions options, boolean isolated,
long readaheadThreshold, ScannerImpl.Reporter reporter) {
this.context = context;
this.timeOut = timeOut;
this.readaheadThreshold = readaheadThreshold;
this.options = new ScannerOptions(options);
this.reporter = reporter;
if (!this.options.fetchedColumns.isEmpty()) {
range = range.bound(this.options.fetchedColumns.first(), this.options.fetchedColumns.last());
}
scanState = new ScanState(context, tableId, authorizations, new Range(range),
options.fetchedColumns, size, options.serverSideIteratorList,
options.serverSideIteratorOptions, isolated, readaheadThreshold,
options.getSamplerConfiguration(), options.batchTimeout, options.classLoaderContext,
options.executionHints, options.getConsistencyLevel() == ConsistencyLevel.EVENTUAL);
// If we want to start readahead immediately, don't wait for hasNext to be called
if (readaheadThreshold == 0L) {
initiateReadAhead();
}
iter = null;
}
@Override
public boolean hasNext() {
if (finished) {
return false;
}
if (iter != null && iter.hasNext()) {
return true;
}
iter = getNextBatch().iterator();
if (!iter.hasNext()) {
finished = true;
reporter.finished(this);
return false;
}
return true;
}
@Override
public Entry<Key,Value> next() {
if (hasNext()) {
return iter.next();
}
throw new NoSuchElementException();
}
void close() {
// run actual close operation in the background so this does not block.
context.executeCleanupTask(() -> {
synchronized (scanState) {
// this is synchronized so its mutually exclusive with readBatch()
try {
closed.set(true);
ThriftScanner.close(scanState);
} catch (Exception e) {
LoggerFactory.getLogger(ScannerIterator.class)
.debug("Exception when closing scan session", e);
}
}
});
}
private void initiateReadAhead() {
Preconditions.checkState(readAheadOperation == null);
readAheadOperation = context.submitScannerReadAheadTask(this::readBatch);
}
private List<KeyValue> readBatch() throws ScanTimedOutException, AccumuloException,
AccumuloSecurityException, TableNotFoundException {
List<KeyValue> batch;
do {
synchronized (scanState) {
// this is synchronized so its mutually exclusive with closing
Preconditions.checkState(!closed.get(), "Scanner was closed");
batch = ThriftScanner.scan(scanState.context, scanState, timeOut);
}
} while (batch != null && batch.isEmpty());
if (batch != null) {
reporter.readBatch(this);
}
return batch == null ? Collections.emptyList() : batch;
}
private List<KeyValue> getNextBatch() {
List<KeyValue> nextBatch;
try {
if (readAheadOperation == null) {
// no read ahead run, fetch the next batch right now
nextBatch = readBatch();
} else {
nextBatch = readAheadOperation.get();
readAheadOperation = null;
}
} catch (ExecutionException ee) {
wrapExecutionException(ee);
throw new IllegalStateException(ee);
} catch (AccumuloException | AccumuloSecurityException | TableNotFoundException
| ScanTimedOutException | InterruptedException e) {
throw new IllegalStateException(e);
}
if (!nextBatch.isEmpty()) {
batchCount++;
if (batchCount > readaheadThreshold) {
// start a thread to read the next batch
initiateReadAhead();
}
}
return nextBatch;
}
private void wrapExecutionException(ExecutionException ee) {
// Need preserve the type of exception that was the cause because some code depends on it.
// However the cause is an exception that occurred in a background thread, so throwing it would
// lose the stack trace for the user thread calling the scanner. Wrapping the exception with the
// same type preserves the type and stack traces (foreground and background thread traces) that
// are critical for debugging.
if (ee.getCause() instanceof IsolationException) {
throw new IsolationException(ee);
}
if (ee.getCause() instanceof TableDeletedException) {
TableDeletedException cause = (TableDeletedException) ee.getCause();
throw new TableDeletedException(cause.getTableId(), cause);
}
if (ee.getCause() instanceof TableOfflineException) {
throw new TableOfflineException(ee);
}
if (ee.getCause() instanceof SampleNotPresentException) {
throw new SampleNotPresentException(ee.getCause().getMessage(), ee);
}
}
}
| 9,825 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ConditionalWriterImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.Delayed;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.ConditionalWriter;
import org.apache.accumulo.core.client.ConditionalWriterConfig;
import org.apache.accumulo.core.client.Durability;
import org.apache.accumulo.core.client.TimedOutException;
import org.apache.accumulo.core.clientImpl.TabletLocator.TabletServerMutations;
import org.apache.accumulo.core.clientImpl.thrift.TInfo;
import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Condition;
import org.apache.accumulo.core.data.ConditionalMutation;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.thrift.TCMResult;
import org.apache.accumulo.core.dataImpl.thrift.TCMStatus;
import org.apache.accumulo.core.dataImpl.thrift.TCondition;
import org.apache.accumulo.core.dataImpl.thrift.TConditionalMutation;
import org.apache.accumulo.core.dataImpl.thrift.TConditionalSession;
import org.apache.accumulo.core.dataImpl.thrift.TKeyExtent;
import org.apache.accumulo.core.dataImpl.thrift.TMutation;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil.LockID;
import org.apache.accumulo.core.lock.ServiceLock;
import org.apache.accumulo.core.rpc.ThriftUtil;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.core.security.VisibilityEvaluator;
import org.apache.accumulo.core.security.VisibilityParseException;
import org.apache.accumulo.core.tabletingest.thrift.TabletIngestClientService;
import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
import org.apache.accumulo.core.trace.TraceUtil;
import org.apache.accumulo.core.util.BadArgumentException;
import org.apache.accumulo.core.util.ByteBufferUtil;
import org.apache.accumulo.core.util.threads.ThreadPools;
import org.apache.accumulo.core.util.threads.Threads;
import org.apache.commons.collections4.map.LRUMap;
import org.apache.commons.lang3.mutable.MutableLong;
import org.apache.hadoop.io.Text;
import org.apache.thrift.TApplicationException;
import org.apache.thrift.TException;
import org.apache.thrift.TServiceClient;
import org.apache.thrift.transport.TTransportException;
import com.google.common.net.HostAndPort;
class ConditionalWriterImpl implements ConditionalWriter {
private static final int MAX_SLEEP = 30000;
private Authorizations auths;
private VisibilityEvaluator ve;
private Map<Text,Boolean> cache = Collections.synchronizedMap(new LRUMap<>(1000));
private final ClientContext context;
private TabletLocator locator;
private final TableId tableId;
private final String tableName;
private long timeout;
private final Durability durability;
private final String classLoaderContext;
private static class ServerQueue {
BlockingQueue<TabletServerMutations<QCMutation>> queue = new LinkedBlockingQueue<>();
boolean taskQueued = false;
}
private Map<String,ServerQueue> serverQueues;
private DelayQueue<QCMutation> failedMutations = new DelayQueue<>();
private ScheduledThreadPoolExecutor threadPool;
private final ScheduledFuture<?> failureTaskFuture;
private class RQIterator implements Iterator<Result> {
private BlockingQueue<Result> rq;
private int count;
public RQIterator(BlockingQueue<Result> resultQueue, int count) {
this.rq = resultQueue;
this.count = count;
}
@Override
public boolean hasNext() {
return count > 0;
}
@Override
public Result next() {
if (count <= 0) {
throw new NoSuchElementException();
}
try {
Result result = rq.poll(1, SECONDS);
while (result == null) {
if (threadPool.isShutdown()) {
throw new NoSuchElementException("ConditionalWriter closed");
}
result = rq.poll(1, SECONDS);
}
count--;
return result;
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
private static class QCMutation extends ConditionalMutation implements Delayed {
private BlockingQueue<Result> resultQueue;
private long resetTime;
private long delay = 50;
private long entryTime;
QCMutation(ConditionalMutation cm, BlockingQueue<Result> resultQueue, long entryTime) {
super(cm);
this.resultQueue = resultQueue;
this.entryTime = entryTime;
}
@Override
public int compareTo(Delayed o) {
QCMutation oqcm = (QCMutation) o;
return Long.compare(resetTime, oqcm.resetTime);
}
@Override
public int hashCode() {
return super.hashCode();
}
@Override
public boolean equals(Object o) {
if (o instanceof QCMutation) {
return compareTo((QCMutation) o) == 0;
}
return false;
}
@Override
public long getDelay(TimeUnit unit) {
return unit.convert(delay - (System.currentTimeMillis() - resetTime), MILLISECONDS);
}
void resetDelay() {
delay = Math.min(delay * 2, MAX_SLEEP);
resetTime = System.currentTimeMillis();
}
void queueResult(Result result) {
resultQueue.add(result);
}
}
private ServerQueue getServerQueue(String location) {
ServerQueue serverQueue;
synchronized (serverQueues) {
serverQueue = serverQueues.get(location);
if (serverQueue == null) {
serverQueue = new ServerQueue();
serverQueues.put(location, serverQueue);
}
}
return serverQueue;
}
private class CleanupTask implements Runnable {
private List<SessionID> sessions;
CleanupTask(List<SessionID> activeSessions) {
this.sessions = activeSessions;
}
@Override
public void run() {
TabletIngestClientService.Iface client = null;
for (SessionID sid : sessions) {
if (!sid.isActive()) {
continue;
}
TInfo tinfo = TraceUtil.traceInfo();
try {
client = getClient(sid.location);
client.closeConditionalUpdate(tinfo, sid.sessionID);
} catch (Exception e) {} finally {
ThriftUtil.returnClient((TServiceClient) client, context);
}
}
}
}
private void queueRetry(List<QCMutation> mutations, HostAndPort server) {
if (timeout < Long.MAX_VALUE) {
long time = System.currentTimeMillis();
ArrayList<QCMutation> mutations2 = new ArrayList<>(mutations.size());
for (QCMutation qcm : mutations) {
qcm.resetDelay();
if (time + qcm.getDelay(MILLISECONDS) > qcm.entryTime + timeout) {
TimedOutException toe;
if (server != null) {
toe = new TimedOutException(Collections.singleton(server.toString()));
} else {
toe = new TimedOutException("Conditional mutation timed out");
}
qcm.queueResult(new Result(toe, qcm, (server == null ? null : server.toString())));
} else {
mutations2.add(qcm);
}
}
if (!mutations2.isEmpty()) {
failedMutations.addAll(mutations2);
}
} else {
mutations.forEach(QCMutation::resetDelay);
failedMutations.addAll(mutations);
}
}
private void queue(List<QCMutation> mutations) {
List<QCMutation> failures = new ArrayList<>();
Map<String,TabletServerMutations<QCMutation>> binnedMutations = new HashMap<>();
try {
locator.binMutations(context, mutations, binnedMutations, failures);
if (failures.size() == mutations.size()) {
context.requireNotDeleted(tableId);
context.requireNotOffline(tableId, tableName);
}
} catch (Exception e) {
mutations.forEach(qcm -> qcm.queueResult(new Result(e, qcm, null)));
// do not want to queue anything that was put in before binMutations() failed
failures.clear();
binnedMutations.clear();
}
if (!failures.isEmpty()) {
queueRetry(failures, null);
}
binnedMutations.forEach(this::queue);
}
private void queue(String location, TabletServerMutations<QCMutation> mutations) {
ServerQueue serverQueue = getServerQueue(location);
synchronized (serverQueue) {
serverQueue.queue.add(mutations);
// never execute more than one task per server
if (!serverQueue.taskQueued) {
threadPool.execute(new SendTask(location));
serverQueue.taskQueued = true;
}
}
}
private void reschedule(SendTask task) {
ServerQueue serverQueue = getServerQueue(task.location);
// just finished processing work for this server, could reschedule if it has more work or
// immediately process the work
// this code reschedules the the server for processing later... there may be other queues with
// more data that need to be processed... also it will give the current server time to build
// up more data... the thinking is that rescheduling instead or processing immediately will
// result in bigger batches and less RPC overhead
synchronized (serverQueue) {
if (serverQueue.queue.isEmpty()) {
serverQueue.taskQueued = false;
} else {
threadPool.execute(task);
}
}
}
private TabletServerMutations<QCMutation> dequeue(String location) {
var queue = getServerQueue(location).queue;
var mutations = new ArrayList<TabletServerMutations<QCMutation>>();
queue.drainTo(mutations);
if (mutations.isEmpty()) {
return null;
}
if (mutations.size() == 1) {
return mutations.get(0);
} else {
// merge multiple request to a single tablet server
TabletServerMutations<QCMutation> tsm = mutations.get(0);
for (int i = 1; i < mutations.size(); i++) {
mutations.get(i).getMutations().forEach((keyExtent, mutationList) -> tsm.getMutations()
.computeIfAbsent(keyExtent, k -> new ArrayList<>()).addAll(mutationList));
}
return tsm;
}
}
ConditionalWriterImpl(ClientContext context, TableId tableId, String tableName,
ConditionalWriterConfig config) {
this.context = context;
this.auths = config.getAuthorizations();
this.ve = new VisibilityEvaluator(config.getAuthorizations());
this.threadPool = context.threadPools().createScheduledExecutorService(
config.getMaxWriteThreads(), this.getClass().getSimpleName(), false);
this.locator = new SyncingTabletLocator(context, tableId);
this.serverQueues = new HashMap<>();
this.tableId = tableId;
this.tableName = tableName;
this.timeout = config.getTimeout(MILLISECONDS);
this.durability = config.getDurability();
this.classLoaderContext = config.getClassLoaderContext();
Runnable failureHandler = () -> {
List<QCMutation> mutations = new ArrayList<>();
failedMutations.drainTo(mutations);
if (!mutations.isEmpty()) {
queue(mutations);
}
};
failureTaskFuture = threadPool.scheduleAtFixedRate(failureHandler, 250, 250, MILLISECONDS);
}
@Override
public Iterator<Result> write(Iterator<ConditionalMutation> mutations) {
ThreadPools.ensureRunning(failureTaskFuture,
"Background task that re-queues failed mutations has exited.");
BlockingQueue<Result> resultQueue = new LinkedBlockingQueue<>();
List<QCMutation> mutationList = new ArrayList<>();
int count = 0;
long entryTime = System.currentTimeMillis();
mloop: while (mutations.hasNext()) {
ConditionalMutation mut = mutations.next();
count++;
if (mut.getConditions().isEmpty()) {
throw new IllegalArgumentException(
"ConditionalMutation had no conditions " + new String(mut.getRow(), UTF_8));
}
for (Condition cond : mut.getConditions()) {
if (!isVisible(cond.getVisibility())) {
resultQueue.add(new Result(Status.INVISIBLE_VISIBILITY, mut, null));
continue mloop;
}
}
// copy the mutations so that even if caller changes it, it will not matter
mutationList.add(new QCMutation(mut, resultQueue, entryTime));
}
queue(mutationList);
return new RQIterator(resultQueue, count);
}
private class SendTask implements Runnable {
String location;
public SendTask(String location) {
this.location = location;
}
@Override
public void run() {
try {
TabletServerMutations<QCMutation> mutations = dequeue(location);
if (mutations != null) {
sendToServer(HostAndPort.fromString(location), mutations);
}
} finally {
reschedule(this);
}
}
}
private static class CMK {
QCMutation cm;
KeyExtent ke;
public CMK(KeyExtent ke, QCMutation cm) {
this.ke = ke;
this.cm = cm;
}
}
private static class SessionID {
HostAndPort location;
String lockId;
long sessionID;
boolean reserved;
long lastAccessTime;
long ttl;
boolean isActive() {
return System.currentTimeMillis() - lastAccessTime < ttl * .95;
}
}
private HashMap<HostAndPort,SessionID> cachedSessionIDs = new HashMap<>();
private SessionID reserveSessionID(HostAndPort location, TabletIngestClientService.Iface client,
TInfo tinfo) throws ThriftSecurityException, TException {
// avoid cost of repeatedly making RPC to create sessions, reuse sessions
synchronized (cachedSessionIDs) {
SessionID sid = cachedSessionIDs.get(location);
if (sid != null) {
if (sid.reserved) {
throw new IllegalStateException();
}
if (sid.isActive()) {
sid.reserved = true;
return sid;
} else {
cachedSessionIDs.remove(location);
}
}
}
TConditionalSession tcs = client.startConditionalUpdate(tinfo, context.rpcCreds(),
ByteBufferUtil.toByteBuffers(auths.getAuthorizations()), tableId.canonical(),
DurabilityImpl.toThrift(durability), this.classLoaderContext);
synchronized (cachedSessionIDs) {
SessionID sid = new SessionID();
sid.reserved = true;
sid.sessionID = tcs.sessionId;
sid.lockId = tcs.tserverLock;
sid.ttl = tcs.ttl;
sid.location = location;
if (cachedSessionIDs.put(location, sid) != null) {
throw new IllegalStateException();
}
return sid;
}
}
private void invalidateSessionID(HostAndPort location) {
synchronized (cachedSessionIDs) {
cachedSessionIDs.remove(location);
}
}
private void unreserveSessionID(HostAndPort location) {
synchronized (cachedSessionIDs) {
SessionID sid = cachedSessionIDs.get(location);
if (sid != null) {
if (!sid.reserved) {
throw new IllegalStateException();
}
sid.reserved = false;
sid.lastAccessTime = System.currentTimeMillis();
}
}
}
List<SessionID> getActiveSessions() {
ArrayList<SessionID> activeSessions = new ArrayList<>();
for (SessionID sid : cachedSessionIDs.values()) {
if (sid.isActive()) {
activeSessions.add(sid);
}
}
return activeSessions;
}
private TabletIngestClientService.Iface getClient(HostAndPort location)
throws TTransportException {
TabletIngestClientService.Iface client;
if (timeout < context.getClientTimeoutInMillis()) {
client = ThriftUtil.getClient(ThriftClientTypes.TABLET_INGEST, location, context, timeout);
} else {
client = ThriftUtil.getClient(ThriftClientTypes.TABLET_INGEST, location, context);
}
return client;
}
private void sendToServer(HostAndPort location, TabletServerMutations<QCMutation> mutations) {
TabletIngestClientService.Iface client = null;
TInfo tinfo = TraceUtil.traceInfo();
Map<Long,CMK> cmidToCm = new HashMap<>();
MutableLong cmid = new MutableLong(0);
SessionID sessionId = null;
try {
Map<TKeyExtent,List<TConditionalMutation>> tmutations = new HashMap<>();
CompressedIterators compressedIters = new CompressedIterators();
convertMutations(mutations, cmidToCm, cmid, tmutations, compressedIters);
// getClient() call must come after converMutations in case it throws a TException
client = getClient(location);
List<TCMResult> tresults = null;
while (tresults == null) {
try {
sessionId = reserveSessionID(location, client, tinfo);
tresults = client.conditionalUpdate(tinfo, sessionId.sessionID, tmutations,
compressedIters.getSymbolTable());
} catch (NoSuchScanIDException nssie) {
sessionId = null;
invalidateSessionID(location);
}
}
HashSet<KeyExtent> extentsToInvalidate = new HashSet<>();
ArrayList<QCMutation> ignored = new ArrayList<>();
for (TCMResult tcmResult : tresults) {
if (tcmResult.status == TCMStatus.IGNORED) {
CMK cmk = cmidToCm.get(tcmResult.cmid);
ignored.add(cmk.cm);
extentsToInvalidate.add(cmk.ke);
} else {
QCMutation qcm = cmidToCm.get(tcmResult.cmid).cm;
qcm.queueResult(new Result(fromThrift(tcmResult.status), qcm, location.toString()));
}
}
for (KeyExtent ke : extentsToInvalidate) {
locator.invalidateCache(ke);
}
queueRetry(ignored, location);
} catch (ThriftSecurityException tse) {
AccumuloSecurityException ase =
new AccumuloSecurityException(context.getCredentials().getPrincipal(), tse.getCode(),
context.getPrintableTableInfoFromId(tableId), tse);
queueException(location, cmidToCm, ase);
} catch (TApplicationException tae) {
queueException(location, cmidToCm, new AccumuloServerException(location.toString(), tae));
} catch (TException e) {
locator.invalidateCache(context, location.toString());
invalidateSession(location, cmidToCm, sessionId);
} catch (Exception e) {
queueException(location, cmidToCm, e);
} finally {
if (sessionId != null) {
unreserveSessionID(location);
}
ThriftUtil.returnClient((TServiceClient) client, context);
}
}
private void queueRetry(Map<Long,CMK> cmidToCm, HostAndPort location) {
ArrayList<QCMutation> ignored = new ArrayList<>();
for (CMK cmk : cmidToCm.values()) {
ignored.add(cmk.cm);
}
queueRetry(ignored, location);
}
private void queueException(HostAndPort location, Map<Long,CMK> cmidToCm, Exception e) {
for (CMK cmk : cmidToCm.values()) {
cmk.cm.queueResult(new Result(e, cmk.cm, location.toString()));
}
}
private void invalidateSession(HostAndPort location, Map<Long,CMK> cmidToCm,
SessionID sessionId) {
if (sessionId == null) {
queueRetry(cmidToCm, location);
} else {
try {
invalidateSession(sessionId, location);
for (CMK cmk : cmidToCm.values()) {
cmk.cm.queueResult(new Result(Status.UNKNOWN, cmk.cm, location.toString()));
}
} catch (Exception e2) {
queueException(location, cmidToCm, e2);
}
}
}
/**
* The purpose of this code is to ensure that a conditional mutation will not execute when its
* status is unknown. This allows a user to read the row when the status is unknown and not have
* to worry about the tserver applying the mutation after the scan.
*
* <p>
* If a conditional mutation is taking a long time to process, then this method will wait for it
* to finish... unless this exceeds timeout.
*/
private void invalidateSession(SessionID sessionId, HostAndPort location)
throws AccumuloException {
long sleepTime = 50;
long startTime = System.currentTimeMillis();
LockID lid = new LockID(context.getZooKeeperRoot() + Constants.ZTSERVERS, sessionId.lockId);
while (true) {
if (!ServiceLock.isLockHeld(context.getZooCache(), lid)) {
// ACCUMULO-1152 added a tserver lock check to the tablet location cache, so this
// invalidation prevents future attempts to contact the
// tserver even its gone zombie and is still running w/o a lock
locator.invalidateCache(context, location.toString());
return;
}
try {
// if the mutation is currently processing, this method will block until its done or times
// out
invalidateSession(sessionId.sessionID, location);
return;
} catch (TApplicationException tae) {
throw new AccumuloServerException(location.toString(), tae);
} catch (TException e) {
locator.invalidateCache(context, location.toString());
}
if ((System.currentTimeMillis() - startTime) + sleepTime > timeout) {
throw new TimedOutException(Collections.singleton(location.toString()));
}
sleepUninterruptibly(sleepTime, MILLISECONDS);
sleepTime = Math.min(2 * sleepTime, MAX_SLEEP);
}
}
private void invalidateSession(long sessionId, HostAndPort location) throws TException {
TabletIngestClientService.Iface client = null;
TInfo tinfo = TraceUtil.traceInfo();
try {
client = getClient(location);
client.invalidateConditionalUpdate(tinfo, sessionId);
} finally {
ThriftUtil.returnClient((TServiceClient) client, context);
}
}
private Status fromThrift(TCMStatus status) {
switch (status) {
case ACCEPTED:
return Status.ACCEPTED;
case REJECTED:
return Status.REJECTED;
case VIOLATED:
return Status.VIOLATED;
default:
throw new IllegalArgumentException(status.toString());
}
}
private void convertMutations(TabletServerMutations<QCMutation> mutations, Map<Long,CMK> cmidToCm,
MutableLong cmid, Map<TKeyExtent,List<TConditionalMutation>> tmutations,
CompressedIterators compressedIters) {
mutations.getMutations().forEach((keyExtent, mutationList) -> {
var tcondMutaions = new ArrayList<TConditionalMutation>();
for (var cm : mutationList) {
TMutation tm = cm.toThrift();
List<TCondition> conditions = convertConditions(cm, compressedIters);
cmidToCm.put(cmid.longValue(), new CMK(keyExtent, cm));
TConditionalMutation tcm = new TConditionalMutation(conditions, tm, cmid.longValue());
cmid.increment();
tcondMutaions.add(tcm);
}
tmutations.put(keyExtent.toThrift(), tcondMutaions);
});
}
private static final Comparator<Long> TIMESTAMP_COMPARATOR =
Comparator.nullsFirst(Comparator.reverseOrder());
static final Comparator<Condition> CONDITION_COMPARATOR =
Comparator.comparing(Condition::getFamily).thenComparing(Condition::getQualifier)
.thenComparing(Condition::getVisibility)
.thenComparing(Condition::getTimestamp, TIMESTAMP_COMPARATOR);
private List<TCondition> convertConditions(ConditionalMutation cm,
CompressedIterators compressedIters) {
List<TCondition> conditions = new ArrayList<>(cm.getConditions().size());
// sort conditions inorder to get better lookup performance. Sort on client side so tserver does
// not have to do it.
Condition[] ca = cm.getConditions().toArray(new Condition[cm.getConditions().size()]);
Arrays.sort(ca, CONDITION_COMPARATOR);
for (Condition cond : ca) {
long ts = 0;
boolean hasTs = false;
if (cond.getTimestamp() != null) {
ts = cond.getTimestamp();
hasTs = true;
}
ByteBuffer iters = compressedIters.compress(cond.getIterators());
TCondition tc = new TCondition(ByteBufferUtil.toByteBuffers(cond.getFamily()),
ByteBufferUtil.toByteBuffers(cond.getQualifier()),
ByteBufferUtil.toByteBuffers(cond.getVisibility()), ts, hasTs,
ByteBufferUtil.toByteBuffers(cond.getValue()), iters);
conditions.add(tc);
}
return conditions;
}
private boolean isVisible(ByteSequence cv) {
Text testVis = new Text(cv.toArray());
if (testVis.getLength() == 0) {
return true;
}
Boolean b = cache.get(testVis);
if (b != null) {
return b;
}
try {
boolean bb = ve.evaluate(new ColumnVisibility(testVis));
cache.put(new Text(testVis), bb);
return bb;
} catch (VisibilityParseException | BadArgumentException e) {
return false;
}
}
@Override
public Result write(ConditionalMutation mutation) {
return write(Collections.singleton(mutation).iterator()).next();
}
@Override
public void close() {
threadPool.shutdownNow();
context.executeCleanupTask(Threads.createNamedRunnable("ConditionalWriterCleanupTask",
new CleanupTask(getActiveSessions())));
}
}
| 9,826 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/AuthenticationTokenIdentifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.Objects.requireNonNull;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.accumulo.core.data.InstanceId;
import org.apache.accumulo.core.securityImpl.thrift.TAuthenticationTokenIdentifier;
import org.apache.accumulo.core.util.ByteBufferUtil;
import org.apache.accumulo.core.util.ThriftMessageUtil;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
/**
* Implementation that identifies the underlying {@link Token} for Accumulo.
*/
public class AuthenticationTokenIdentifier extends TokenIdentifier {
public static final Text TOKEN_KIND = new Text("ACCUMULO_AUTH_TOKEN");
private final TAuthenticationTokenIdentifier impl;
public AuthenticationTokenIdentifier() {
impl = new TAuthenticationTokenIdentifier();
populateFields(impl);
}
public AuthenticationTokenIdentifier(TAuthenticationTokenIdentifier identifier) {
requireNonNull(identifier);
impl = new TAuthenticationTokenIdentifier(identifier);
populateFields(identifier);
}
public void setKeyId(int keyId) {
impl.setKeyId(keyId);
}
public int getKeyId() {
return impl.getKeyId();
}
public void setIssueDate(long issueDate) {
impl.setIssueDate(issueDate);
}
public long getIssueDate() {
return impl.getIssueDate();
}
public void setExpirationDate(long expirationDate) {
impl.setExpirationDate(expirationDate);
}
public long getExpirationDate() {
return impl.getExpirationDate();
}
public void setInstanceId(InstanceId instanceId) {
impl.setInstanceId(instanceId.canonical());
}
public InstanceId getInstanceId() {
if (impl.getInstanceId() == null) {
return InstanceId.of("");
} else {
return InstanceId.of(impl.getInstanceId());
}
}
public TAuthenticationTokenIdentifier getThriftIdentifier() {
return impl;
}
@Override
public void write(DataOutput out) throws IOException {
ThriftMessageUtil msgUtil = new ThriftMessageUtil();
ByteBuffer serialized = msgUtil.serialize(impl);
out.writeInt(serialized.limit());
ByteBufferUtil.write(out, serialized);
}
@Override
public void readFields(DataInput in) throws IOException {
int length = in.readInt();
if (length > 0) {
ThriftMessageUtil msgUtil = new ThriftMessageUtil();
byte[] serialized = new byte[length];
in.readFully(serialized);
var tAuthTokenId = msgUtil.deserialize(serialized, new TAuthenticationTokenIdentifier());
populateFields(tAuthTokenId);
}
}
private void populateFields(TAuthenticationTokenIdentifier tAuthTokenId) {
impl.principal = tAuthTokenId.getPrincipal();
setExpirationDate(tAuthTokenId.getExpirationDate());
setIssueDate(tAuthTokenId.getIssueDate());
if (tAuthTokenId.getInstanceId() != null) {
setInstanceId(InstanceId.of(tAuthTokenId.getInstanceId()));
}
setKeyId(tAuthTokenId.getKeyId());
}
@Override
public Text getKind() {
return TOKEN_KIND;
}
@Override
public UserGroupInformation getUser() {
if (impl.isSetPrincipal()) {
return UserGroupInformation.createRemoteUser(impl.getPrincipal());
}
return null;
}
@Override
public int hashCode() {
HashCodeBuilder hcb = new HashCodeBuilder(7, 11);
if (impl.isSetPrincipal()) {
hcb.append(impl.getPrincipal());
}
if (impl.isSetKeyId()) {
hcb.append(impl.getKeyId());
}
if (impl.isSetIssueDate()) {
hcb.append(impl.getIssueDate());
}
if (impl.isSetExpirationDate()) {
hcb.append(impl.getExpirationDate());
}
if (impl.isSetInstanceId()) {
hcb.append(impl.getInstanceId());
}
return hcb.toHashCode();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder(128);
sb.append("AuthenticationTokenIdentifier(").append(impl).append(")");
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (o instanceof AuthenticationTokenIdentifier) {
AuthenticationTokenIdentifier other = (AuthenticationTokenIdentifier) o;
return impl.equals(other.impl);
}
return false;
}
public static TAuthenticationTokenIdentifier createTAuthIdentifier(String principal, int keyId,
long issueDate, long expirationDate, String instanceId) {
TAuthenticationTokenIdentifier tIdentifier = new TAuthenticationTokenIdentifier(principal);
tIdentifier.setKeyId(keyId);
tIdentifier.setIssueDate(issueDate);
tIdentifier.setExpirationDate(expirationDate);
tIdentifier.setInstanceId(instanceId);
return tIdentifier;
}
public boolean isSetIssueDate() {
return impl.isSetIssueDate();
}
public boolean isSetExpirationDate() {
return impl.isSetExpirationDate();
}
}
| 9,827 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/AccumuloServerException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.thrift.TApplicationException;
/**
* This class is intended to encapsulate errors that occurred on the server side.
*/
public class AccumuloServerException extends AccumuloException {
private static final long serialVersionUID = 1L;
private String server;
AccumuloServerException(final AccumuloServerException cause) {
super("Error on server " + cause.getServer(), cause);
}
public AccumuloServerException(final String server, final TApplicationException tae) {
super("Error on server " + server, tae);
this.setServer(server);
}
private void setServer(final String server) {
this.server = server;
}
public String getServer() {
return server;
}
}
| 9,828 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ThriftScanner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.SortedSet;
import java.util.stream.Collectors;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.SampleNotPresentException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.clientImpl.TabletLocator.TabletLocation;
import org.apache.accumulo.core.clientImpl.thrift.TInfo;
import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.KeyValue;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.dataImpl.thrift.InitialScan;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
import org.apache.accumulo.core.dataImpl.thrift.ScanResult;
import org.apache.accumulo.core.dataImpl.thrift.TKeyValue;
import org.apache.accumulo.core.rpc.ThriftUtil;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.spi.scan.ScanServerAttempt;
import org.apache.accumulo.core.spi.scan.ScanServerSelections;
import org.apache.accumulo.core.spi.scan.ScanServerSelector;
import org.apache.accumulo.core.tabletscan.thrift.ScanServerBusyException;
import org.apache.accumulo.core.tabletscan.thrift.TSampleNotPresentException;
import org.apache.accumulo.core.tabletscan.thrift.TabletScanClientService;
import org.apache.accumulo.core.tabletscan.thrift.TooManyFilesException;
import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
import org.apache.accumulo.core.trace.TraceUtil;
import org.apache.accumulo.core.util.OpTimer;
import org.apache.hadoop.io.Text;
import org.apache.thrift.TApplicationException;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.net.HostAndPort;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.context.Scope;
public class ThriftScanner {
private static final Logger log = LoggerFactory.getLogger(ThriftScanner.class);
// This set is initially empty when the client starts. The first time this
// client contacts a server it will wait for any writes that are in progress.
// This is to account for the case where a client may have sent writes
// to accumulo and dies while waiting for a confirmation from
// accumulo. The client process restarts and tries to read
// data from accumulo making the assumption that it will get
// any writes previously made, however if the server side thread
// processing the write from the dead client is still in progress,
// the restarted client may not see the write unless we wait here.
// this behavior is very important when the client is reading the
// metadata
public static final Map<TabletType,Set<String>> serversWaitedForWrites =
new EnumMap<>(TabletType.class);
static {
for (TabletType ttype : TabletType.values()) {
serversWaitedForWrites.put(ttype, Collections.synchronizedSet(new HashSet<>()));
}
}
public static boolean getBatchFromServer(ClientContext context, Range range, KeyExtent extent,
String server, SortedMap<Key,Value> results, SortedSet<Column> fetchedColumns,
List<IterInfo> serverSideIteratorList,
Map<String,Map<String,String>> serverSideIteratorOptions, int size,
Authorizations authorizations, long batchTimeOut, String classLoaderContext)
throws AccumuloException, AccumuloSecurityException {
if (server == null) {
throw new AccumuloException(new IOException());
}
final HostAndPort parsedServer = HostAndPort.fromString(server);
try {
TInfo tinfo = TraceUtil.traceInfo();
TabletScanClientService.Client client =
ThriftUtil.getClient(ThriftClientTypes.TABLET_SCAN, parsedServer, context);
try {
// not reading whole rows (or stopping on row boundaries) so there is no need to enable
// isolation below
ScanState scanState = new ScanState(context, extent.tableId(), authorizations, range,
fetchedColumns, size, serverSideIteratorList, serverSideIteratorOptions, false,
Constants.SCANNER_DEFAULT_READAHEAD_THRESHOLD, null, batchTimeOut, classLoaderContext,
null, false);
TabletType ttype = TabletType.type(extent);
boolean waitForWrites = !serversWaitedForWrites.get(ttype).contains(server);
InitialScan isr = client.startScan(tinfo, scanState.context.rpcCreds(), extent.toThrift(),
scanState.range.toThrift(),
scanState.columns.stream().map(Column::toThrift).collect(Collectors.toList()),
scanState.size, scanState.serverSideIteratorList, scanState.serverSideIteratorOptions,
scanState.authorizations.getAuthorizationsBB(), waitForWrites, scanState.isolated,
scanState.readaheadThreshold, null, scanState.batchTimeOut, classLoaderContext,
scanState.executionHints, 0L);
if (waitForWrites) {
serversWaitedForWrites.get(ttype).add(server);
}
Key.decompress(isr.result.results);
for (TKeyValue kv : isr.result.results) {
results.put(new Key(kv.key), new Value(kv.value));
}
client.closeScan(tinfo, isr.scanID);
return isr.result.more;
} finally {
ThriftUtil.returnClient(client, context);
}
} catch (TApplicationException tae) {
throw new AccumuloServerException(server, tae);
} catch (TooManyFilesException e) {
log.debug("Tablet ({}) has too many files {} : {}", extent, server, e.getMessage());
} catch (ThriftSecurityException e) {
log.warn("Security Violation in scan request to {}: {}", server, e.getMessage());
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (TException e) {
log.debug("Error getting transport to {}: {}", server, e.getMessage());
}
throw new AccumuloException("getBatchFromServer: failed");
}
public static class ScanState {
boolean isolated;
TableId tableId;
Text startRow;
boolean skipStartRow;
long readaheadThreshold;
long batchTimeOut;
boolean runOnScanServer;
Range range;
int size;
ClientContext context;
Authorizations authorizations;
List<Column> columns;
TabletLocation prevLoc;
Long scanID;
String classLoaderContext;
boolean finished = false;
List<IterInfo> serverSideIteratorList;
Map<String,Map<String,String>> serverSideIteratorOptions;
SamplerConfiguration samplerConfig;
Map<String,String> executionHints;
ScanServerAttemptsImpl scanAttempts;
Duration busyTimeout;
TabletLocation getErrorLocation() {
return prevLoc;
}
public ScanState(ClientContext context, TableId tableId, Authorizations authorizations,
Range range, SortedSet<Column> fetchedColumns, int size,
List<IterInfo> serverSideIteratorList,
Map<String,Map<String,String>> serverSideIteratorOptions, boolean isolated,
long readaheadThreshold, SamplerConfiguration samplerConfig, long batchTimeOut,
String classLoaderContext, Map<String,String> executionHints, boolean useScanServer) {
this.context = context;
this.authorizations = authorizations;
this.classLoaderContext = classLoaderContext;
columns = new ArrayList<>(fetchedColumns.size());
for (Column column : fetchedColumns) {
columns.add(column);
}
this.tableId = tableId;
this.range = range;
Key startKey = range.getStartKey();
if (startKey == null) {
startKey = new Key();
}
this.startRow = startKey.getRow();
this.skipStartRow = false;
this.size = size;
this.serverSideIteratorList = serverSideIteratorList;
this.serverSideIteratorOptions = serverSideIteratorOptions;
this.isolated = isolated;
this.readaheadThreshold = readaheadThreshold;
this.samplerConfig = samplerConfig;
this.batchTimeOut = batchTimeOut;
if (executionHints == null || executionHints.isEmpty()) {
this.executionHints = null; // avoid thrift serialization for empty map
} else {
this.executionHints = executionHints;
}
this.runOnScanServer = useScanServer;
if (useScanServer) {
scanAttempts = new ScanServerAttemptsImpl();
}
}
}
public static class ScanTimedOutException extends IOException {
private static final long serialVersionUID = 1L;
}
static long pause(long millis, long maxSleep, boolean runOnScanServer)
throws InterruptedException {
if (!runOnScanServer) {
// the client side scan server plugin controls sleep time... this sleep is for regular scans
// where the scan server plugin does not have control
Thread.sleep(millis);
}
// wait 2 * last time, with +-10% random jitter
return (long) (Math.min(millis * 2, maxSleep) * (.9 + RANDOM.get().nextDouble() / 5));
}
public static List<KeyValue> scan(ClientContext context, ScanState scanState, long timeOut)
throws ScanTimedOutException, AccumuloException, AccumuloSecurityException,
TableNotFoundException {
TabletLocation loc = null;
long startTime = System.currentTimeMillis();
String lastError = null;
String error = null;
int tooManyFilesCount = 0;
long sleepMillis = 100;
final long maxSleepTime =
context.getConfiguration().getTimeInMillis(Property.GENERAL_MAX_SCANNER_RETRY_PERIOD);
List<KeyValue> results = null;
Span parent = TraceUtil.startSpan(ThriftScanner.class, "scan");
try (Scope scope = parent.makeCurrent()) {
while (results == null && !scanState.finished) {
if (Thread.currentThread().isInterrupted()) {
throw new AccumuloException("Thread interrupted");
}
if ((System.currentTimeMillis() - startTime) / 1000.0 > timeOut) {
throw new ScanTimedOutException();
}
while (loc == null) {
long currentTime = System.currentTimeMillis();
if ((currentTime - startTime) / 1000.0 > timeOut) {
throw new ScanTimedOutException();
}
Span child1 = TraceUtil.startSpan(ThriftScanner.class, "scan::locateTablet");
try (Scope locateSpan = child1.makeCurrent()) {
loc = TabletLocator.getLocator(context, scanState.tableId).locateTablet(context,
scanState.startRow, scanState.skipStartRow, false);
if (loc == null) {
context.requireNotDeleted(scanState.tableId);
context.requireNotOffline(scanState.tableId, null);
error = "Failed to locate tablet for table : " + scanState.tableId + " row : "
+ scanState.startRow;
if (!error.equals(lastError)) {
log.debug("{}", error);
} else if (log.isTraceEnabled()) {
log.trace("{}", error);
}
lastError = error;
sleepMillis = pause(sleepMillis, maxSleepTime, scanState.runOnScanServer);
} else {
// when a tablet splits we do want to continue scanning the low child
// of the split if we are already passed it
Range dataRange = loc.getExtent().toDataRange();
if (scanState.range.getStartKey() != null
&& dataRange.afterEndKey(scanState.range.getStartKey())) {
// go to the next tablet
scanState.startRow = loc.getExtent().endRow();
scanState.skipStartRow = true;
loc = null;
} else if (scanState.range.getEndKey() != null
&& dataRange.beforeStartKey(scanState.range.getEndKey())) {
// should not happen
throw new IllegalStateException("Unexpected tablet, extent : " + loc.getExtent()
+ " range : " + scanState.range + " startRow : " + scanState.startRow);
}
}
} catch (AccumuloServerException e) {
TraceUtil.setException(child1, e, true);
log.debug("Scan failed, server side exception : {}", e.getMessage());
throw e;
} catch (AccumuloException e) {
error = "exception from tablet loc " + e.getMessage();
if (!error.equals(lastError)) {
log.debug("{}", error);
} else if (log.isTraceEnabled()) {
log.trace("{}", error);
}
TraceUtil.setException(child1, e, false);
lastError = error;
sleepMillis = pause(sleepMillis, maxSleepTime, scanState.runOnScanServer);
} finally {
child1.end();
}
}
Span child2 = TraceUtil.startSpan(ThriftScanner.class, "scan::location",
Map.of("tserver", loc.getTserverLocation()));
try (Scope scanLocation = child2.makeCurrent()) {
results = scan(loc, scanState, context);
} catch (AccumuloSecurityException e) {
context.clearTableListCache();
context.requireNotDeleted(scanState.tableId);
e.setTableInfo(context.getPrintableTableInfoFromId(scanState.tableId));
TraceUtil.setException(child2, e, true);
throw e;
} catch (TApplicationException tae) {
TraceUtil.setException(child2, tae, true);
throw new AccumuloServerException(scanState.getErrorLocation().getTserverLocation(), tae);
} catch (TSampleNotPresentException tsnpe) {
String message = "Table " + context.getPrintableTableInfoFromId(scanState.tableId)
+ " does not have sampling configured or built";
TraceUtil.setException(child2, tsnpe, true);
throw new SampleNotPresentException(message, tsnpe);
} catch (NotServingTabletException e) {
error = "Scan failed, not serving tablet " + scanState.getErrorLocation();
if (!error.equals(lastError)) {
log.debug("{}", error);
} else if (log.isTraceEnabled()) {
log.trace("{}", error);
}
lastError = error;
TabletLocator.getLocator(context, scanState.tableId).invalidateCache(loc.getExtent());
loc = null;
// no need to try the current scan id somewhere else
scanState.scanID = null;
if (scanState.isolated) {
TraceUtil.setException(child2, e, true);
throw new IsolationException();
}
TraceUtil.setException(child2, e, false);
sleepMillis = pause(sleepMillis, maxSleepTime, scanState.runOnScanServer);
} catch (ScanServerBusyException e) {
error = "Scan failed, scan server was busy " + scanState.getErrorLocation();
if (!error.equals(lastError)) {
log.debug("{}", error);
} else if (log.isTraceEnabled()) {
log.trace("{}", error);
}
lastError = error;
if (scanState.isolated) {
TraceUtil.setException(child2, e, true);
throw new IsolationException();
}
TraceUtil.setException(child2, e, false);
scanState.scanID = null;
} catch (NoSuchScanIDException e) {
error = "Scan failed, no such scan id " + scanState.scanID + " "
+ scanState.getErrorLocation();
if (!error.equals(lastError)) {
log.debug("{}", error);
} else if (log.isTraceEnabled()) {
log.trace("{}", error);
}
lastError = error;
if (scanState.isolated) {
TraceUtil.setException(child2, e, true);
throw new IsolationException();
}
TraceUtil.setException(child2, e, false);
scanState.scanID = null;
} catch (TooManyFilesException e) {
error = "Tablet has too many files " + scanState.getErrorLocation() + " retrying...";
if (error.equals(lastError)) {
tooManyFilesCount++;
if (tooManyFilesCount == 300) {
log.warn("{}", error);
} else if (log.isTraceEnabled()) {
log.trace("{}", error);
}
} else {
log.debug("{}", error);
tooManyFilesCount = 0;
}
lastError = error;
// not sure what state the scan session on the server side is
// in after this occurs, so lets be cautious and start a new
// scan session
scanState.scanID = null;
if (scanState.isolated) {
TraceUtil.setException(child2, e, true);
throw new IsolationException();
}
TraceUtil.setException(child2, e, false);
sleepMillis = pause(sleepMillis, maxSleepTime, scanState.runOnScanServer);
} catch (TException e) {
TabletLocator.getLocator(context, scanState.tableId).invalidateCache(context,
loc.getTserverLocation());
error = "Scan failed, thrift error " + e.getClass().getName() + " " + e.getMessage()
+ " " + scanState.getErrorLocation();
if (!error.equals(lastError)) {
log.debug("{}", error);
} else if (log.isTraceEnabled()) {
log.trace("{}", error);
}
lastError = error;
loc = null;
// do not want to continue using the same scan id, if a timeout occurred could cause a
// batch to be skipped
// because a thread on the server side may still be processing the timed out continue scan
scanState.scanID = null;
if (scanState.isolated) {
TraceUtil.setException(child2, e, true);
throw new IsolationException();
}
TraceUtil.setException(child2, e, false);
sleepMillis = pause(sleepMillis, maxSleepTime, scanState.runOnScanServer);
} finally {
child2.end();
}
}
if (results != null && results.isEmpty() && scanState.finished) {
results = null;
}
return results;
} catch (InterruptedException ex) {
TraceUtil.setException(parent, ex, true);
throw new AccumuloException(ex);
} finally {
parent.end();
}
}
private static List<KeyValue> scan(TabletLocation loc, ScanState scanState, ClientContext context)
throws AccumuloSecurityException, NotServingTabletException, TException,
NoSuchScanIDException, TooManyFilesException, TSampleNotPresentException {
if (scanState.finished) {
return null;
}
if (scanState.runOnScanServer) {
TabletLocation newLoc;
var tabletId = new TabletIdImpl(loc.getExtent());
if (scanState.scanID != null && scanState.prevLoc != null
&& scanState.prevLoc.getTserverSession().equals("scan_server")
&& scanState.prevLoc.getExtent().equals(loc.getExtent())) {
// this is the case of continuing a scan on a scan server for the same tablet, so lets not
// call the scan server selector and just go back to the previous scan server
newLoc = scanState.prevLoc;
log.trace(
"For tablet {} continuing scan on scan server {} without consulting scan server selector, using busyTimeout {}",
loc.getExtent(), newLoc.getTserverLocation(), scanState.busyTimeout);
} else {
// obtain a snapshot once and only expose this snapshot to the plugin for consistency
var attempts = scanState.scanAttempts.snapshot();
var params = new ScanServerSelector.SelectorParameters() {
@Override
public List<TabletId> getTablets() {
return List.of(tabletId);
}
@Override
public Collection<? extends ScanServerAttempt> getAttempts(TabletId tabletId) {
return attempts.getOrDefault(tabletId, Set.of());
}
@Override
public Map<String,String> getHints() {
if (scanState.executionHints == null) {
return Map.of();
}
return scanState.executionHints;
}
};
ScanServerSelections actions = context.getScanServerSelector().selectServers(params);
Duration delay = null;
String scanServer = actions.getScanServer(tabletId);
if (scanServer != null) {
newLoc = new TabletLocation(loc.getExtent(), scanServer, "scan_server");
delay = actions.getDelay();
scanState.busyTimeout = actions.getBusyTimeout();
log.trace(
"For tablet {} scan server selector chose scan_server:{} delay:{} busyTimeout:{}",
loc.getExtent(), scanServer, delay, scanState.busyTimeout);
} else {
newLoc = loc;
delay = actions.getDelay();
scanState.busyTimeout = Duration.ZERO;
log.trace("For tablet {} scan server selector chose tablet_server", loc.getExtent());
}
if (!delay.isZero()) {
try {
Thread.sleep(delay.toMillis());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
var reporter = scanState.scanAttempts.createReporter(newLoc.getTserverLocation(), tabletId);
try {
return scanRpc(newLoc, scanState, context, scanState.busyTimeout.toMillis());
} catch (ScanServerBusyException ssbe) {
reporter.report(ScanServerAttempt.Result.BUSY);
throw ssbe;
} catch (Exception e) {
reporter.report(ScanServerAttempt.Result.ERROR);
throw e;
}
} else {
return scanRpc(loc, scanState, context, 0L);
}
}
private static List<KeyValue> scanRpc(TabletLocation loc, ScanState scanState,
ClientContext context, long busyTimeout) throws AccumuloSecurityException,
NotServingTabletException, TException, NoSuchScanIDException, TooManyFilesException,
TSampleNotPresentException, ScanServerBusyException {
OpTimer timer = null;
final TInfo tinfo = TraceUtil.traceInfo();
final HostAndPort parsedLocation = HostAndPort.fromString(loc.getTserverLocation());
TabletScanClientService.Client client =
ThriftUtil.getClient(ThriftClientTypes.TABLET_SCAN, parsedLocation, context);
String old = Thread.currentThread().getName();
try {
ScanResult sr;
if (scanState.prevLoc != null && !scanState.prevLoc.equals(loc)) {
scanState.scanID = null;
}
scanState.prevLoc = loc;
if (scanState.scanID == null) {
Thread.currentThread().setName("Starting scan tserver=" + loc.getTserverLocation()
+ " tableId=" + loc.getExtent().tableId());
if (log.isTraceEnabled()) {
String msg = "Starting scan tserver=" + loc.getTserverLocation() + " tablet="
+ loc.getExtent() + " range=" + scanState.range + " ssil="
+ scanState.serverSideIteratorList + " ssio=" + scanState.serverSideIteratorOptions
+ " context=" + scanState.classLoaderContext;
log.trace("tid={} {}", Thread.currentThread().getId(), msg);
timer = new OpTimer().start();
}
TabletType ttype = TabletType.type(loc.getExtent());
boolean waitForWrites =
!serversWaitedForWrites.get(ttype).contains(loc.getTserverLocation());
InitialScan is = client.startScan(tinfo, scanState.context.rpcCreds(),
loc.getExtent().toThrift(), scanState.range.toThrift(),
scanState.columns.stream().map(Column::toThrift).collect(Collectors.toList()),
scanState.size, scanState.serverSideIteratorList, scanState.serverSideIteratorOptions,
scanState.authorizations.getAuthorizationsBB(), waitForWrites, scanState.isolated,
scanState.readaheadThreshold,
SamplerConfigurationImpl.toThrift(scanState.samplerConfig), scanState.batchTimeOut,
scanState.classLoaderContext, scanState.executionHints, busyTimeout);
if (waitForWrites) {
serversWaitedForWrites.get(ttype).add(loc.getTserverLocation());
}
sr = is.result;
if (sr.more) {
scanState.scanID = is.scanID;
} else {
client.closeScan(tinfo, is.scanID);
}
} else {
// log.debug("Calling continue scan : "+scanState.range+" loc = "+loc);
String msg =
"Continuing scan tserver=" + loc.getTserverLocation() + " scanid=" + scanState.scanID;
Thread.currentThread().setName(msg);
if (log.isTraceEnabled()) {
log.trace("tid={} {}", Thread.currentThread().getId(), msg);
timer = new OpTimer().start();
}
sr = client.continueScan(tinfo, scanState.scanID, busyTimeout);
if (!sr.more) {
client.closeScan(tinfo, scanState.scanID);
scanState.scanID = null;
}
}
if (sr.more) {
if (timer != null) {
timer.stop();
log.trace("tid={} Finished scan in {} #results={} scanid={}",
Thread.currentThread().getId(), String.format("%.3f secs", timer.scale(SECONDS)),
sr.results.size(), scanState.scanID);
}
} else {
// log.debug("No more : tab end row = "+loc.tablet_extent.getEndRow()+" range =
// "+scanState.range);
if (loc.getExtent().endRow() == null) {
scanState.finished = true;
if (timer != null) {
timer.stop();
log.trace("tid={} Completely finished scan in {} #results={}",
Thread.currentThread().getId(), String.format("%.3f secs", timer.scale(SECONDS)),
sr.results.size());
}
} else if (scanState.range.getEndKey() == null || !scanState.range
.afterEndKey(new Key(loc.getExtent().endRow()).followingKey(PartialKey.ROW))) {
scanState.startRow = loc.getExtent().endRow();
scanState.skipStartRow = true;
if (timer != null) {
timer.stop();
log.trace("tid={} Finished scanning tablet in {} #results={}",
Thread.currentThread().getId(), String.format("%.3f secs", timer.scale(SECONDS)),
sr.results.size());
}
} else {
scanState.finished = true;
if (timer != null) {
timer.stop();
log.trace("tid={} Completely finished in {} #results={}",
Thread.currentThread().getId(), String.format("%.3f secs", timer.scale(SECONDS)),
sr.results.size());
}
}
}
Key.decompress(sr.results);
if (!sr.results.isEmpty() && !scanState.finished) {
scanState.range = new Range(new Key(sr.results.get(sr.results.size() - 1).key), false,
scanState.range.getEndKey(), scanState.range.isEndKeyInclusive());
}
List<KeyValue> results = new ArrayList<>(sr.results.size());
for (TKeyValue tkv : sr.results) {
results.add(new KeyValue(new Key(tkv.key), tkv.value));
}
return results;
} catch (ThriftSecurityException e) {
throw new AccumuloSecurityException(e.user, e.code, e);
} finally {
ThriftUtil.returnClient(client, context);
Thread.currentThread().setName(old);
}
}
static void close(ScanState scanState) {
if (!scanState.finished && scanState.scanID != null && scanState.prevLoc != null) {
TInfo tinfo = TraceUtil.traceInfo();
log.debug("Closing active scan {} {}", scanState.prevLoc, scanState.scanID);
HostAndPort parsedLocation = HostAndPort.fromString(scanState.prevLoc.getTserverLocation());
TabletScanClientService.Client client = null;
try {
client =
ThriftUtil.getClient(ThriftClientTypes.TABLET_SCAN, parsedLocation, scanState.context);
client.closeScan(tinfo, scanState.scanID);
} catch (TException e) {
// ignore this is a best effort
log.debug("Failed to close active scan " + scanState.prevLoc + " " + scanState.scanID, e);
} finally {
if (client != null) {
ThriftUtil.returnClient(client, scanState.context);
}
}
}
}
}
| 9,829 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/TabletType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.Collection;
import org.apache.accumulo.core.dataImpl.KeyExtent;
public enum TabletType {
ROOT, METADATA, USER;
public static TabletType type(KeyExtent ke) {
if (ke.isRootTablet()) {
return ROOT;
}
if (ke.isMeta()) {
return METADATA;
}
return USER;
}
public static TabletType type(Collection<KeyExtent> extents) {
if (extents.isEmpty()) {
throw new IllegalArgumentException();
}
TabletType ttype = null;
for (KeyExtent extent : extents) {
if (ttype == null) {
ttype = type(extent);
} else if (ttype != type(extent)) {
throw new IllegalArgumentException(
"multiple extent types not allowed " + ttype + " " + type(extent));
}
}
return ttype;
}
}
| 9,830 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ImportConfigurationImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import org.apache.accumulo.core.client.admin.ImportConfiguration;
import com.google.common.base.Preconditions;
public class ImportConfigurationImpl implements ImportConfiguration, ImportConfiguration.Builder {
private boolean built = false;
private boolean keepOffline = false;
private boolean keepMappingsFile = false;
private final static String BUILT_ERROR_MSG = "ImportConfiguration was already built";
private final static String NOT_BUILT_ERROR_MSG = "ImportConfiguration was not built yet";
@Override
public ImportConfiguration.Builder setKeepOffline(boolean keepOffline) {
Preconditions.checkState(!built, BUILT_ERROR_MSG);
this.keepOffline = keepOffline;
return this;
}
@Override
public Builder setKeepMappings(boolean keepMappings) {
Preconditions.checkState(!built, BUILT_ERROR_MSG);
this.keepMappingsFile = keepMappings;
return this;
}
@Override
public ImportConfiguration build() {
built = true;
return this;
}
@Override
public boolean isKeepOffline() {
Preconditions.checkState(built, NOT_BUILT_ERROR_MSG);
return keepOffline;
}
@Override
public boolean isKeepMappings() {
Preconditions.checkState(built, NOT_BUILT_ERROR_MSG);
return keepMappingsFile;
}
}
| 9,831 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/AccumuloBulkMergeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import org.apache.accumulo.core.client.AccumuloException;
/**
* Internal class indicating a concurrent merge occurred during the new bulk import.
*/
public class AccumuloBulkMergeException extends AccumuloException {
private static final long serialVersionUID = 1L;
private static final String MSG = "Concurrent merge happened";
public AccumuloBulkMergeException(final Throwable cause) {
super(MSG, cause);
}
}
| 9,832 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ScannerOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.ScannerBase;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.util.TextUtil;
import org.apache.hadoop.io.Text;
public class ScannerOptions implements ScannerBase {
protected List<IterInfo> serverSideIteratorList = Collections.emptyList();
protected Map<String,Map<String,String>> serverSideIteratorOptions = Collections.emptyMap();
protected SortedSet<Column> fetchedColumns = new TreeSet<>();
protected long retryTimeout = Long.MAX_VALUE;
protected long batchTimeout = Long.MAX_VALUE;
private String regexIterName = null;
private SamplerConfiguration samplerConfig = null;
protected String classLoaderContext = null;
protected Map<String,String> executionHints = Collections.emptyMap();
private ConsistencyLevel consistencyLevel = ConsistencyLevel.IMMEDIATE;
protected ScannerOptions() {}
public ScannerOptions(ScannerOptions so) {
setOptions(this, so);
}
@Override
public synchronized void addScanIterator(IteratorSetting si) {
checkArgument(si != null, "si is null");
if (serverSideIteratorList.isEmpty()) {
serverSideIteratorList = new ArrayList<>();
}
for (IterInfo ii : serverSideIteratorList) {
if (ii.iterName.equals(si.getName())) {
throw new IllegalArgumentException("Iterator name is already in use " + si.getName());
}
if (ii.getPriority() == si.getPriority()) {
throw new IllegalArgumentException(
"Iterator priority is already in use " + si.getPriority());
}
}
serverSideIteratorList.add(new IterInfo(si.getPriority(), si.getIteratorClass(), si.getName()));
if (serverSideIteratorOptions.isEmpty()) {
serverSideIteratorOptions = new HashMap<>();
}
serverSideIteratorOptions.computeIfAbsent(si.getName(), k -> new HashMap<>())
.putAll(si.getOptions());
}
@Override
public synchronized void removeScanIterator(String iteratorName) {
checkArgument(iteratorName != null, "iteratorName is null");
// if no iterators are set, we don't have it, so it is already removed
if (serverSideIteratorList.isEmpty()) {
return;
}
for (IterInfo ii : serverSideIteratorList) {
if (ii.iterName.equals(iteratorName)) {
serverSideIteratorList.remove(ii);
break;
}
}
serverSideIteratorOptions.remove(iteratorName);
}
@Override
public synchronized void updateScanIteratorOption(String iteratorName, String key, String value) {
checkArgument(iteratorName != null, "iteratorName is null");
checkArgument(key != null, "key is null");
checkArgument(value != null, "value is null");
if (serverSideIteratorOptions.isEmpty()) {
serverSideIteratorOptions = new HashMap<>();
}
serverSideIteratorOptions.computeIfAbsent(iteratorName, k -> new HashMap<>()).put(key, value);
}
@Override
public synchronized void fetchColumnFamily(Text col) {
checkArgument(col != null, "col is null");
Column c = new Column(TextUtil.getBytes(col), null, null);
fetchedColumns.add(c);
}
@Override
public synchronized void fetchColumn(Text colFam, Text colQual) {
checkArgument(colFam != null, "colFam is null");
checkArgument(colQual != null, "colQual is null");
Column c = new Column(TextUtil.getBytes(colFam), TextUtil.getBytes(colQual), null);
fetchedColumns.add(c);
}
@Override
public void fetchColumn(IteratorSetting.Column column) {
checkArgument(column != null, "Column is null");
fetchColumn(column.getColumnFamily(), column.getColumnQualifier());
}
@Override
public synchronized void clearColumns() {
fetchedColumns.clear();
}
public synchronized SortedSet<Column> getFetchedColumns() {
return fetchedColumns;
}
@Override
public synchronized void clearScanIterators() {
serverSideIteratorList = Collections.emptyList();
serverSideIteratorOptions = Collections.emptyMap();
regexIterName = null;
}
protected static void setOptions(ScannerOptions dst, ScannerOptions src) {
synchronized (dst) {
synchronized (src) {
dst.regexIterName = src.regexIterName;
dst.fetchedColumns = new TreeSet<>(src.fetchedColumns);
dst.serverSideIteratorList = new ArrayList<>(src.serverSideIteratorList);
dst.classLoaderContext = src.classLoaderContext;
dst.serverSideIteratorOptions = new HashMap<>();
Set<Entry<String,Map<String,String>>> es = src.serverSideIteratorOptions.entrySet();
for (Entry<String,Map<String,String>> entry : es) {
dst.serverSideIteratorOptions.put(entry.getKey(), new HashMap<>(entry.getValue()));
}
dst.samplerConfig = src.samplerConfig;
dst.batchTimeout = src.batchTimeout;
// its an immutable map, so can avoid copy here
dst.executionHints = src.executionHints;
dst.consistencyLevel = src.consistencyLevel;
}
}
}
@Override
public Iterator<Entry<Key,Value>> iterator() {
throw new UnsupportedOperationException();
}
@Override
public synchronized void setTimeout(long timeout, TimeUnit timeUnit) {
if (timeout < 0) {
throw new IllegalArgumentException("retry timeout must be positive : " + timeout);
}
if (timeout == 0) {
this.retryTimeout = Long.MAX_VALUE;
} else {
this.retryTimeout = timeUnit.toMillis(timeout);
}
}
@Override
public synchronized long getTimeout(TimeUnit timeunit) {
return timeunit.convert(retryTimeout, MILLISECONDS);
}
@Override
public void close() {
// Nothing needs to be closed
}
@Override
public Authorizations getAuthorizations() {
throw new UnsupportedOperationException("No authorizations to return");
}
@Override
public synchronized void setSamplerConfiguration(SamplerConfiguration samplerConfig) {
requireNonNull(samplerConfig);
this.samplerConfig = samplerConfig;
}
@Override
public synchronized SamplerConfiguration getSamplerConfiguration() {
return samplerConfig;
}
@Override
public synchronized void clearSamplerConfiguration() {
this.samplerConfig = null;
}
@Override
public void setBatchTimeout(long timeout, TimeUnit timeUnit) {
if (timeout < 0) {
throw new IllegalArgumentException("Batch timeout must be positive : " + timeout);
}
if (timeout == 0) {
this.batchTimeout = Long.MAX_VALUE;
} else {
this.batchTimeout = timeUnit.toMillis(timeout);
}
}
@Override
public long getBatchTimeout(TimeUnit timeUnit) {
return timeUnit.convert(batchTimeout, MILLISECONDS);
}
@Override
public void setClassLoaderContext(String classLoaderContext) {
requireNonNull(classLoaderContext, "classloader context name cannot be null");
this.classLoaderContext = classLoaderContext;
}
@Override
public void clearClassLoaderContext() {
this.classLoaderContext = null;
}
@Override
public String getClassLoaderContext() {
return this.classLoaderContext;
}
@Override
public synchronized void setExecutionHints(Map<String,String> hints) {
this.executionHints = Map.copyOf(Objects.requireNonNull(hints));
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public void setConsistencyLevel(ConsistencyLevel level) {
this.consistencyLevel = Objects.requireNonNull(level);
}
}
| 9,833 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/CloneConfigurationImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import org.apache.accumulo.core.client.admin.CloneConfiguration;
import com.google.common.base.Preconditions;
/**
* A {@link CloneConfiguration} implementation which also implements the builder thereof
*
* @since 1.10 and 2.1
*/
public class CloneConfigurationImpl implements CloneConfiguration, CloneConfiguration.Builder {
// This boolean allows building an immutable CloneConfiguration object without creating
// separate Builder and CloneConfiguration objects. This is done to reduce object creation and
// copying. This could easily be changed to two objects without changing the interfaces.
private boolean built = false;
// determines if memory is flushed in the source table before cloning.
private boolean flush = true;
// the sources table properties are copied, this allows overriding of those properties
private Map<String,String> propertiesToSet = null;
// do not copy these properties from the source table, just revert to system defaults
private Set<String> propertiesToExclude = null;
// do not bring the table online after cloning
private boolean keepOffline = false;
public CloneConfigurationImpl() {}
@Override
public boolean isFlush() {
Preconditions.checkState(built);
return flush;
}
@Override
public Map<String,String> getPropertiesToSet() {
Preconditions.checkState(built);
return (propertiesToSet == null ? Collections.emptyMap()
: Collections.unmodifiableMap(propertiesToSet));
}
@Override
public Set<String> getPropertiesToExclude() {
Preconditions.checkState(built);
return (propertiesToExclude == null ? Collections.emptySet()
: Collections.unmodifiableSet(propertiesToExclude));
}
@Override
public boolean isKeepOffline() {
Preconditions.checkState(built);
return keepOffline;
}
@Override
public Builder setFlush(boolean flush) {
Preconditions.checkState(!built);
this.flush = flush;
return this;
}
@Override
public Builder setPropertiesToSet(Map<String,String> propertiesToSet) {
Preconditions.checkState(!built);
this.propertiesToSet = propertiesToSet;
return this;
}
@Override
public Builder setPropertiesToExclude(Set<String> propertiesToExclude) {
Preconditions.checkState(!built);
this.propertiesToExclude = propertiesToExclude;
return this;
}
@Override
public Builder setKeepOffline(boolean keepOffline) {
Preconditions.checkState(!built);
this.keepOffline = keepOffline;
return this;
}
@Override
public CloneConfiguration build() {
Preconditions.checkState(!built);
built = true;
return this;
}
@Override
public String toString() {
return "{flush=" + flush + ", propertiesToSet=" + propertiesToSet + ", propertiesToExclude="
+ propertiesToExclude + ", keepOffline=" + keepOffline + ", built=" + built + "}";
}
}
| 9,834 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ActiveCompactionImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.ActiveCompaction;
import org.apache.accumulo.core.client.admin.ActiveCompaction.CompactionHost.Type;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
import com.google.common.net.HostAndPort;
/**
* @since 1.6.0
*/
public class ActiveCompactionImpl extends ActiveCompaction {
private org.apache.accumulo.core.tabletserver.thrift.ActiveCompaction tac;
private ClientContext context;
private HostAndPort hostport;
private Type type;
ActiveCompactionImpl(ClientContext context,
org.apache.accumulo.core.tabletserver.thrift.ActiveCompaction tac, HostAndPort hostport,
CompactionHost.Type type) {
this.tac = tac;
this.context = context;
this.hostport = hostport;
this.type = type;
}
@Override
public String getTable() throws TableNotFoundException {
return context.getTableName(KeyExtent.fromThrift(tac.getExtent()).tableId());
}
@Override
public TabletId getTablet() {
return new TabletIdImpl(KeyExtent.fromThrift(tac.getExtent()));
}
@Override
public long getAge() {
return tac.getAge();
}
@Override
public List<String> getInputFiles() {
return tac.getInputFiles();
}
@Override
public String getOutputFile() {
return tac.getOutputFile();
}
@Override
public CompactionType getType() {
return CompactionType.valueOf(tac.getType().name());
}
@Override
public CompactionReason getReason() {
return CompactionReason.valueOf(tac.getReason().name());
}
@Override
public String getLocalityGroup() {
return tac.getLocalityGroup();
}
@Override
public long getEntriesRead() {
return tac.getEntriesRead();
}
@Override
public long getEntriesWritten() {
return tac.getEntriesWritten();
}
@Override
public long getPausedCount() {
return tac.getTimesPaused();
}
@Override
public List<IteratorSetting> getIterators() {
ArrayList<IteratorSetting> ret = new ArrayList<>();
for (IterInfo ii : tac.getSsiList()) {
IteratorSetting settings =
new IteratorSetting(ii.getPriority(), ii.getIterName(), ii.getClassName());
Map<String,String> options = tac.getSsio().get(ii.getIterName());
settings.addOptions(options);
ret.add(settings);
}
return ret;
}
@Override
public CompactionHost getHost() {
return new CompactionHost() {
@Override
public Type getType() {
return type;
}
@Override
public String getAddress() {
return hostport.getHost();
}
@Override
public int getPort() {
return hostport.getPort();
}
};
}
}
| 9,835 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/NamespaceOperationsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.accumulo.core.util.Validators.EXISTING_NAMESPACE_NAME;
import static org.apache.accumulo.core.util.Validators.NEW_NAMESPACE_NAME;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.ConcurrentModificationException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.NamespaceExistsException;
import org.apache.accumulo.core.client.NamespaceNotEmptyException;
import org.apache.accumulo.core.client.NamespaceNotFoundException;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.TableOperations;
import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode;
import org.apache.accumulo.core.clientImpl.thrift.TVersionedProperties;
import org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType;
import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.data.NamespaceId;
import org.apache.accumulo.core.data.constraints.Constraint;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.manager.thrift.FateOperation;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes;
import org.apache.accumulo.core.trace.TraceUtil;
import org.apache.accumulo.core.util.LocalityGroupUtil;
import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
import org.apache.accumulo.core.util.OpTimer;
import org.apache.accumulo.core.util.Retry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NamespaceOperationsImpl extends NamespaceOperationsHelper {
private final ClientContext context;
private TableOperationsImpl tableOps;
private static final Logger log = LoggerFactory.getLogger(TableOperations.class);
public NamespaceOperationsImpl(ClientContext context, TableOperationsImpl tableOps) {
checkArgument(context != null, "context is null");
this.context = context;
this.tableOps = tableOps;
}
@Override
public SortedSet<String> list() {
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Fetching list of namespaces...", Thread.currentThread().getId());
timer = new OpTimer().start();
}
TreeSet<String> namespaces = new TreeSet<>(Namespaces.getNameToIdMap(context).keySet());
if (timer != null) {
timer.stop();
log.trace("tid={} Fetched {} namespaces in {}", Thread.currentThread().getId(),
namespaces.size(), String.format("%.3f secs", timer.scale(SECONDS)));
}
return namespaces;
}
@Override
public boolean exists(String namespace) {
EXISTING_NAMESPACE_NAME.validate(namespace);
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Checking if namespace {} exists", Thread.currentThread().getId(),
namespace);
timer = new OpTimer().start();
}
boolean exists = Namespaces.namespaceNameExists(context, namespace);
if (timer != null) {
timer.stop();
log.trace("tid={} Checked existence of {} in {}", Thread.currentThread().getId(), exists,
String.format("%.3f secs", timer.scale(SECONDS)));
}
return exists;
}
@Override
public void create(String namespace)
throws AccumuloException, AccumuloSecurityException, NamespaceExistsException {
NEW_NAMESPACE_NAME.validate(namespace);
try {
doNamespaceFateOperation(FateOperation.NAMESPACE_CREATE,
Arrays.asList(ByteBuffer.wrap(namespace.getBytes(UTF_8))), Collections.emptyMap(),
namespace);
} catch (NamespaceNotFoundException e) {
// should not happen
throw new AssertionError(e);
}
}
@Override
public void delete(String namespace) throws AccumuloException, AccumuloSecurityException,
NamespaceNotFoundException, NamespaceNotEmptyException {
EXISTING_NAMESPACE_NAME.validate(namespace);
NamespaceId namespaceId = Namespaces.getNamespaceId(context, namespace);
if (namespaceId.equals(Namespace.ACCUMULO.id()) || namespaceId.equals(Namespace.DEFAULT.id())) {
Credentials credentials = context.getCredentials();
log.debug("{} attempted to delete the {} namespace", credentials.getPrincipal(), namespaceId);
throw new AccumuloSecurityException(credentials.getPrincipal(),
SecurityErrorCode.UNSUPPORTED_OPERATION);
}
if (!Namespaces.getTableIds(context, namespaceId).isEmpty()) {
throw new NamespaceNotEmptyException(namespaceId.canonical(), namespace, null);
}
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(namespace.getBytes(UTF_8)));
Map<String,String> opts = new HashMap<>();
try {
doNamespaceFateOperation(FateOperation.NAMESPACE_DELETE, args, opts, namespace);
} catch (NamespaceExistsException e) {
// should not happen
throw new AssertionError(e);
}
}
@Override
public void rename(String oldNamespaceName, String newNamespaceName)
throws AccumuloSecurityException, NamespaceNotFoundException, AccumuloException,
NamespaceExistsException {
EXISTING_NAMESPACE_NAME.validate(oldNamespaceName);
NEW_NAMESPACE_NAME.validate(newNamespaceName);
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(oldNamespaceName.getBytes(UTF_8)),
ByteBuffer.wrap(newNamespaceName.getBytes(UTF_8)));
Map<String,String> opts = new HashMap<>();
doNamespaceFateOperation(FateOperation.NAMESPACE_RENAME, args, opts, oldNamespaceName);
}
@Override
public void setProperty(final String namespace, final String property, final String value)
throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
EXISTING_NAMESPACE_NAME.validate(namespace);
checkArgument(property != null, "property is null");
checkArgument(value != null, "value is null");
try {
ThriftClientTypes.MANAGER.executeVoidTableCommand(context,
client -> client.setNamespaceProperty(TraceUtil.traceInfo(), context.rpcCreds(),
namespace, property, value));
} catch (TableNotFoundException e) {
if (e.getCause() instanceof NamespaceNotFoundException) {
throw (NamespaceNotFoundException) e.getCause();
} else {
throw new AccumuloException(e);
}
}
checkLocalityGroups(namespace, property);
}
private Map<String,String> tryToModifyProperties(final String namespace,
final Consumer<Map<String,String>> mapMutator)
throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
final TVersionedProperties vProperties =
ThriftClientTypes.CLIENT.execute(context, client -> client
.getVersionedNamespaceProperties(TraceUtil.traceInfo(), context.rpcCreds(), namespace));
mapMutator.accept(vProperties.getProperties());
// A reference to the map was passed to the user, maybe they still have the reference and are
// modifying it. Buggy Accumulo code could attempt to make modifications to the map after this
// point. Because of these potential issues, create an immutable snapshot of the map so that
// from here on the code is assured to always be dealing with the same map.
vProperties.setProperties(Map.copyOf(vProperties.getProperties()));
try {
// Send to server
ThriftClientTypes.MANAGER.executeVoidTableCommand(context,
client -> client.modifyNamespaceProperties(TraceUtil.traceInfo(), context.rpcCreds(),
namespace, vProperties));
for (String property : vProperties.getProperties().keySet()) {
checkLocalityGroups(namespace, property);
}
} catch (TableNotFoundException e) {
if (e.getCause() instanceof NamespaceNotFoundException) {
throw (NamespaceNotFoundException) e.getCause();
} else {
throw new AccumuloException(e);
}
}
return vProperties.getProperties();
}
@Override
public Map<String,String> modifyProperties(final String namespace,
final Consumer<Map<String,String>> mapMutator)
throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
EXISTING_NAMESPACE_NAME.validate(namespace);
checkArgument(mapMutator != null, "mapMutator is null");
Retry retry =
Retry.builder().infiniteRetries().retryAfter(25, MILLISECONDS).incrementBy(25, MILLISECONDS)
.maxWait(30, SECONDS).backOffFactor(1.5).logInterval(3, MINUTES).createRetry();
while (true) {
try {
var props = tryToModifyProperties(namespace, mapMutator);
retry.logCompletion(log, "Modifying properties for namespace " + namespace);
return props;
} catch (ConcurrentModificationException cme) {
try {
retry.logRetry(log, "Unable to modify namespace properties for " + namespace
+ " because of concurrent modification");
retry.waitForNextAttempt(log, "Modify namespace properties for " + namespace);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} finally {
retry.useRetry();
}
}
}
@Override
public void removeProperty(final String namespace, final String property)
throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
EXISTING_NAMESPACE_NAME.validate(namespace);
checkArgument(property != null, "property is null");
try {
ThriftClientTypes.MANAGER.executeVoidTableCommand(context, client -> client
.removeNamespaceProperty(TraceUtil.traceInfo(), context.rpcCreds(), namespace, property));
} catch (TableNotFoundException e) {
if (e.getCause() instanceof NamespaceNotFoundException) {
throw (NamespaceNotFoundException) e.getCause();
} else {
throw new AccumuloException(e);
}
}
checkLocalityGroups(namespace, property);
}
@Override
public Map<String,String> getConfiguration(final String namespace)
throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
EXISTING_NAMESPACE_NAME.validate(namespace);
try {
return ThriftClientTypes.CLIENT.execute(context, client -> client
.getNamespaceConfiguration(TraceUtil.traceInfo(), context.rpcCreds(), namespace));
} catch (AccumuloSecurityException e) {
throw e;
} catch (AccumuloException e) {
Throwable t = e.getCause();
if (t instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) t;
if (ttoe.getType() == TableOperationExceptionType.NAMESPACE_NOTFOUND) {
throw new NamespaceNotFoundException(ttoe);
}
throw e;
}
throw e;
} catch (Exception e) {
throw new AccumuloException(e);
}
}
@Override
public Map<String,String> getNamespaceProperties(String namespace)
throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
EXISTING_NAMESPACE_NAME.validate(namespace);
try {
return ThriftClientTypes.CLIENT.execute(context, client -> client
.getNamespaceProperties(TraceUtil.traceInfo(), context.rpcCreds(), namespace));
} catch (AccumuloException e) {
Throwable t = e.getCause();
if (t instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) t;
if (ttoe.getType() == TableOperationExceptionType.NAMESPACE_NOTFOUND) {
throw new NamespaceNotFoundException(ttoe);
}
throw e;
}
throw e;
} catch (Exception e) {
throw new AccumuloException(e);
}
}
@Override
public Map<String,String> namespaceIdMap() {
return Namespaces.getNameToIdMap(context).entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().canonical(), (v1, v2) -> {
throw new IllegalStateException(
String.format("Duplicate key for values %s and %s", v1, v2));
}, TreeMap::new));
}
@Override
public boolean testClassLoad(final String namespace, final String className,
final String asTypeName)
throws NamespaceNotFoundException, AccumuloException, AccumuloSecurityException {
EXISTING_NAMESPACE_NAME.validate(namespace);
checkArgument(className != null, "className is null");
checkArgument(asTypeName != null, "asTypeName is null");
try {
return ThriftClientTypes.CLIENT.execute(context,
client -> client.checkNamespaceClass(TraceUtil.traceInfo(), context.rpcCreds(), namespace,
className, asTypeName));
} catch (AccumuloSecurityException | AccumuloException e) {
Throwable t = e.getCause();
if (t instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) t;
if (ttoe.getType() == TableOperationExceptionType.NAMESPACE_NOTFOUND) {
throw new NamespaceNotFoundException(ttoe);
}
throw e;
}
throw e;
} catch (Exception e) {
throw new AccumuloException(e);
}
}
@Override
public void attachIterator(String namespace, IteratorSetting setting,
EnumSet<IteratorScope> scopes)
throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
// testClassLoad validates the namespace name
testClassLoad(namespace, setting.getIteratorClass(), SortedKeyValueIterator.class.getName());
super.attachIterator(namespace, setting, scopes);
}
@Override
public int addConstraint(String namespace, String constraintClassName)
throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
// testClassLoad validates the namespace name
testClassLoad(namespace, constraintClassName, Constraint.class.getName());
return super.addConstraint(namespace, constraintClassName);
}
private String doNamespaceFateOperation(FateOperation op, List<ByteBuffer> args,
Map<String,String> opts, String namespace) throws AccumuloSecurityException,
AccumuloException, NamespaceExistsException, NamespaceNotFoundException {
// caller should validate the namespace name
try {
return tableOps.doFateOperation(op, args, opts, namespace);
} catch (TableExistsException | TableNotFoundException e) {
// should not happen
throw new AssertionError(e);
}
}
private void checkLocalityGroups(String namespace, String propChanged)
throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
EXISTING_NAMESPACE_NAME.validate(namespace);
if (LocalityGroupUtil.isLocalityGroupProperty(propChanged)) {
Map<String,String> allProps = getConfiguration(namespace);
try {
LocalityGroupUtil.checkLocalityGroups(allProps);
} catch (LocalityGroupConfigurationError | RuntimeException e) {
LoggerFactory.getLogger(this.getClass()).warn("Changing '" + propChanged
+ "' for namespace '" + namespace
+ "'resulted in bad locality group config. This may be a transient situation since the"
+ " config spreads over multiple properties. Setting properties in a different order "
+ "may help. Even though this warning was displayed, the property was updated. Please "
+ "check your config to ensure consistency.", e);
}
}
}
}
| 9,836 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ZookeeperLockChecker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.clientImpl.TabletLocatorImpl.TabletServerLockChecker;
import org.apache.accumulo.core.fate.zookeeper.ZooCache;
import org.apache.accumulo.core.lock.ServiceLock;
public class ZookeeperLockChecker implements TabletServerLockChecker {
private final ZooCache zc;
private final String root;
ZookeeperLockChecker(ClientContext context) {
zc = context.getZooCache();
this.root = context.getZooKeeperRoot() + Constants.ZTSERVERS;
}
@Override
public boolean isLockHeld(String tserver, String session) {
var zLockPath = ServiceLock.path(root + "/" + tserver);
return ServiceLock.getSessionId(zc, zLockPath) == Long.parseLong(session, 16);
}
@Override
public void invalidateCache(String tserver) {
zc.clear(root + "/" + tserver);
}
}
| 9,837 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.FILES;
import static org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.LOCATION;
import static org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.PREV_ROW;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.SampleNotPresentException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.crypto.CryptoFactoryLoader;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.KeyValue;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.file.FileOperations;
import org.apache.accumulo.core.file.FileSKVIterator;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.IteratorConfigUtil;
import org.apache.accumulo.core.iteratorsImpl.system.MultiIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SystemIteratorUtil;
import org.apache.accumulo.core.manager.state.tables.TableState;
import org.apache.accumulo.core.metadata.StoredTabletFile;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
import org.apache.accumulo.core.metadata.schema.TabletMetadata;
import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.core.util.LocalityGroupUtil;
import org.apache.accumulo.core.volume.VolumeConfiguration;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Text;
class OfflineIterator implements Iterator<Entry<Key,Value>> {
static class OfflineIteratorEnvironment implements IteratorEnvironment {
private final Authorizations authorizations;
private final AccumuloConfiguration conf;
private final boolean useSample;
private final SamplerConfiguration sampleConf;
public OfflineIteratorEnvironment(Authorizations auths, AccumuloConfiguration acuTableConf,
boolean useSample, SamplerConfiguration samplerConf) {
this.authorizations = auths;
this.conf = acuTableConf;
this.useSample = useSample;
this.sampleConf = samplerConf;
}
@Override
public IteratorScope getIteratorScope() {
return IteratorScope.scan;
}
@Override
public boolean isFullMajorCompaction() {
return false;
}
@Override
public boolean isUserCompaction() {
return false;
}
private final ArrayList<SortedKeyValueIterator<Key,Value>> topLevelIterators =
new ArrayList<>();
@Override
public Authorizations getAuthorizations() {
return authorizations;
}
SortedKeyValueIterator<Key,Value> getTopLevelIterator(SortedKeyValueIterator<Key,Value> iter) {
if (topLevelIterators.isEmpty()) {
return iter;
}
ArrayList<SortedKeyValueIterator<Key,Value>> allIters = new ArrayList<>(topLevelIterators);
allIters.add(iter);
return new MultiIterator(allIters, false);
}
@Override
public boolean isSamplingEnabled() {
return useSample;
}
@Override
public SamplerConfiguration getSamplerConfiguration() {
return sampleConf;
}
@Override
public IteratorEnvironment cloneWithSamplingEnabled() {
if (sampleConf == null) {
throw new SampleNotPresentException();
}
return new OfflineIteratorEnvironment(authorizations, conf, true, sampleConf);
}
}
private SortedKeyValueIterator<Key,Value> iter;
private Range range;
private KeyExtent currentExtent;
private final TableId tableId;
private final Authorizations authorizations;
private final ClientContext context;
private final ScannerOptions options;
private final ArrayList<SortedKeyValueIterator<Key,Value>> readers;
public OfflineIterator(ScannerOptions options, ClientContext context,
Authorizations authorizations, Text table, Range range) {
this.options = new ScannerOptions(options);
this.context = context;
this.range = range;
if (!this.options.fetchedColumns.isEmpty()) {
this.range =
range.bound(this.options.fetchedColumns.first(), this.options.fetchedColumns.last());
}
this.tableId = TableId.of(table.toString());
this.authorizations = authorizations;
this.readers = new ArrayList<>();
try {
nextTablet();
while (iter != null && !iter.hasTop()) {
nextTablet();
}
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (AccumuloException | AccumuloSecurityException | TableNotFoundException e) {
throw new IllegalStateException(e);
}
}
@Override
public boolean hasNext() {
return iter != null && iter.hasTop();
}
@Override
public Entry<Key,Value> next() {
try {
byte[] v = iter.getTopValue().get();
// copy just like tablet server does, do this before calling next
KeyValue ret = new KeyValue(new Key(iter.getTopKey()), Arrays.copyOf(v, v.length));
iter.next();
while (iter != null && !iter.hasTop()) {
nextTablet();
}
return ret;
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (AccumuloException | AccumuloSecurityException | TableNotFoundException e) {
throw new IllegalStateException(e);
}
}
private void nextTablet()
throws TableNotFoundException, AccumuloException, IOException, AccumuloSecurityException {
Range nextRange;
if (currentExtent == null) {
Text startRow;
if (range.getStartKey() != null) {
startRow = range.getStartKey().getRow();
} else {
startRow = new Text();
}
nextRange = new Range(TabletsSection.encodeRow(tableId, startRow), true, null, false);
} else {
if (currentExtent.endRow() == null
|| range.afterEndKey(new Key(currentExtent.endRow()).followingKey(PartialKey.ROW))) {
iter = null;
return;
}
nextRange = new Range(currentExtent.toMetaRow(), false, null, false);
}
TabletMetadata tablet = getTabletFiles(nextRange);
while (tablet.getLocation() != null) {
if (context.getTableState(tableId) != TableState.OFFLINE) {
context.clearTableListCache();
if (context.getTableState(tableId) != TableState.OFFLINE) {
throw new AccumuloException("Table is online " + tableId
+ " cannot scan tablet in offline mode " + tablet.getExtent());
}
}
sleepUninterruptibly(250, MILLISECONDS);
tablet = getTabletFiles(nextRange);
}
if (!tablet.getExtent().tableId().equals(tableId)) {
throw new AccumuloException(
" did not find tablets for table " + tableId + " " + tablet.getExtent());
}
if (currentExtent != null && !tablet.getExtent().isPreviousExtent(currentExtent)) {
throw new AccumuloException(
" " + currentExtent + " is not previous extent " + tablet.getExtent());
}
iter = createIterator(tablet.getExtent(), tablet.getFiles());
iter.seek(range, LocalityGroupUtil.families(options.fetchedColumns),
!options.fetchedColumns.isEmpty());
currentExtent = tablet.getExtent();
}
private TabletMetadata getTabletFiles(Range nextRange) {
try (TabletsMetadata tablets = TabletsMetadata.builder(context).scanMetadataTable()
.overRange(nextRange).fetch(FILES, LOCATION, PREV_ROW).build()) {
return tablets.iterator().next();
}
}
private SortedKeyValueIterator<Key,Value> createIterator(KeyExtent extent,
Collection<StoredTabletFile> absFiles)
throws TableNotFoundException, AccumuloException, IOException, AccumuloSecurityException {
// possible race condition here, if table is renamed
String tableName = context.getTableName(tableId);
var tableConf = context.tableOperations().getConfiguration(tableName);
AccumuloConfiguration tableCC = new ConfigurationCopy(tableConf);
var systemConf = context.instanceOperations().getSystemConfiguration();
Configuration conf = context.getHadoopConf();
for (SortedKeyValueIterator<Key,Value> reader : readers) {
((FileSKVIterator) reader).close();
}
readers.clear();
SamplerConfiguration scannerSamplerConfig = options.getSamplerConfiguration();
SamplerConfigurationImpl scannerSamplerConfigImpl =
scannerSamplerConfig == null ? null : new SamplerConfigurationImpl(scannerSamplerConfig);
SamplerConfigurationImpl samplerConfImpl = SamplerConfigurationImpl.newSamplerConfig(tableCC);
if (scannerSamplerConfigImpl != null && !scannerSamplerConfigImpl.equals(samplerConfImpl)) {
throw new SampleNotPresentException();
}
for (StoredTabletFile file : absFiles) {
var cs = CryptoFactoryLoader.getServiceForClientWithTable(systemConf, tableConf, tableId);
FileSystem fs = VolumeConfiguration.fileSystemForPath(file.getNormalizedPathStr(), conf);
FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder()
.forFile(file, fs, conf, cs).withTableConfiguration(tableCC).build();
if (scannerSamplerConfigImpl != null) {
reader = reader.getSample(scannerSamplerConfigImpl);
if (reader == null) {
throw new SampleNotPresentException();
}
}
readers.add(reader);
}
MultiIterator multiIter = new MultiIterator(readers, extent);
OfflineIteratorEnvironment iterEnv = new OfflineIteratorEnvironment(authorizations, tableCC,
false, samplerConfImpl == null ? null : samplerConfImpl.toSamplerConfiguration());
byte[] defaultSecurityLabel;
ColumnVisibility cv =
new ColumnVisibility(tableCC.get(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY));
defaultSecurityLabel = cv.getExpression();
SortedKeyValueIterator<Key,Value> visFilter =
SystemIteratorUtil.setupSystemScanIterators(multiIter,
new HashSet<>(options.fetchedColumns), authorizations, defaultSecurityLabel, tableCC);
var iteratorBuilderEnv = IteratorConfigUtil.loadIterConf(IteratorScope.scan,
options.serverSideIteratorList, options.serverSideIteratorOptions, tableCC);
var iteratorBuilder = iteratorBuilderEnv.env(iterEnv).build();
return iterEnv
.getTopLevelIterator(IteratorConfigUtil.loadIterators(visFilter, iteratorBuilder));
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
| 9,838 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/Namespace.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.Objects;
import org.apache.accumulo.core.data.NamespaceId;
public class Namespace {
public static final Namespace DEFAULT = new Namespace("", NamespaceId.of("+default"));
public static final Namespace ACCUMULO = new Namespace("accumulo", NamespaceId.of("+accumulo"));
public static final String SEPARATOR = ".";
private final String name;
private final NamespaceId id;
public Namespace(String name, NamespaceId id) {
this.name = Objects.requireNonNull(name);
this.id = Objects.requireNonNull(id);
}
public String name() {
return name;
}
public NamespaceId id() {
return id;
}
}
| 9,839 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/Credentials.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.nio.ByteBuffer;
import java.util.Base64;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode;
import org.apache.accumulo.core.data.InstanceId;
import org.apache.accumulo.core.securityImpl.thrift.TCredentials;
/**
* A wrapper for internal use. This class carries the instance, principal, and authentication token
* for use in the public API, in a non-serialized form. This is important, so that the
* authentication token carried in a {@link AccumuloClient} can be destroyed, invalidating future
* RPC operations from that {@link AccumuloClient}.
* <p>
* See ACCUMULO-1312
*
* @since 1.6.0
*/
public class Credentials {
private String principal;
private AuthenticationToken token;
/**
* Creates a new credentials object.
*
* @param principal unique identifier for the entity (e.g. a user or service) authorized for these
* credentials
* @param token authentication token used to prove that the principal for these credentials has
* been properly verified
*/
public Credentials(String principal, AuthenticationToken token) {
this.principal = principal;
this.token = token;
}
/**
* Gets the principal.
*
* @return unique identifier for the entity (e.g. a user or service) authorized for these
* credentials
*/
public String getPrincipal() {
return principal;
}
/**
* Gets the authentication token.
*
* @return authentication token used to prove that the principal for these credentials has been
* properly verified
*/
public AuthenticationToken getToken() {
return token;
}
/**
* Converts the current object to the relevant thrift type. The object returned from this contains
* a non-destroyable version of the {@link AuthenticationToken}, so this should be used just
* before placing on the wire, and references to it should be tightly controlled.
*
* @param instanceID Accumulo instance ID
* @return Thrift credentials
* @throws IllegalStateException if the authentication token has been destroyed (expired)
*/
public TCredentials toThrift(InstanceId instanceID) {
TCredentials tCreds = new TCredentials(getPrincipal(), getToken().getClass().getName(),
ByteBuffer.wrap(AuthenticationTokenSerializer.serialize(getToken())),
instanceID.canonical());
if (getToken().isDestroyed()) {
throw new IllegalStateException("Token has been destroyed",
new AccumuloSecurityException(getPrincipal(), SecurityErrorCode.TOKEN_EXPIRED));
}
return tCreds;
}
/**
* Converts a given thrift object to our internal Credentials representation.
*
* @param serialized a Thrift encoded set of credentials
* @return a new Credentials instance; destroy the token when you're done.
*/
public static Credentials fromThrift(TCredentials serialized) {
return new Credentials(serialized.getPrincipal(), AuthenticationTokenSerializer
.deserialize(serialized.getTokenClassName(), serialized.getToken()));
}
/**
* Converts the current object to a serialized form. The object returned from this contains a
* non-destroyable version of the {@link AuthenticationToken}, so references to it should be
* tightly controlled.
*
* @return serialized form of these credentials
*/
public final String serialize() {
return (getPrincipal() == null ? "-"
: Base64.getEncoder().encodeToString(getPrincipal().getBytes(UTF_8)))
+ ":"
+ (getToken() == null ? "-"
: Base64.getEncoder().encodeToString(getToken().getClass().getName().getBytes(UTF_8)))
+ ":" + (getToken() == null ? "-" : Base64.getEncoder()
.encodeToString(AuthenticationTokenSerializer.serialize(getToken())));
}
/**
* Converts the serialized form to an instance of {@link Credentials}. The original serialized
* form will not be affected.
*
* @param serializedForm serialized form of credentials
* @return deserialized credentials
*/
public static final Credentials deserialize(String serializedForm) {
String[] split = serializedForm.split(":", 3);
String principal =
split[0].equals("-") ? null : new String(Base64.getDecoder().decode(split[0]), UTF_8);
String tokenType =
split[1].equals("-") ? null : new String(Base64.getDecoder().decode(split[1]), UTF_8);
AuthenticationToken token = null;
if (!split[2].equals("-")) {
byte[] tokenBytes = Base64.getDecoder().decode(split[2]);
token = AuthenticationTokenSerializer.deserialize(tokenType, tokenBytes);
}
return new Credentials(principal, token);
}
@Override
public int hashCode() {
return getPrincipal() == null ? 0 : getPrincipal().hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof Credentials)) {
return false;
}
Credentials other = Credentials.class.cast(obj);
boolean pEq = getPrincipal() == null ? (other.getPrincipal() == null)
: getPrincipal().equals(other.getPrincipal());
if (!pEq) {
return false;
}
return getToken() == null ? (other.getToken() == null) : getToken().equals(other.getToken());
}
@Override
public String toString() {
return getClass().getName() + ":" + getPrincipal() + ":"
+ (getToken() == null ? null : getToken().getClass().getName()) + ":<hidden>";
}
}
| 9,840 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/TabletServerBatchReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import java.lang.ref.Cleaner.Cleanable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.util.cleaner.CleanerUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TabletServerBatchReader extends ScannerOptions implements BatchScanner {
private static final Logger log = LoggerFactory.getLogger(TabletServerBatchReader.class);
private static final AtomicInteger nextBatchReaderInstance = new AtomicInteger(1);
private final int batchReaderInstance = nextBatchReaderInstance.getAndIncrement();
private final TableId tableId;
private final String tableName;
private final int numThreads;
private final ThreadPoolExecutor queryThreadPool;
private final ClientContext context;
private final Authorizations authorizations;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final Cleanable cleanable;
private ArrayList<Range> ranges = null;
public TabletServerBatchReader(ClientContext context, TableId tableId, String tableName,
Authorizations authorizations, int numQueryThreads) {
this(context, BatchScanner.class, tableId, tableName, authorizations, numQueryThreads);
}
protected TabletServerBatchReader(ClientContext context, Class<?> scopeClass, TableId tableId,
String tableName, Authorizations authorizations, int numQueryThreads) {
checkArgument(context != null, "context is null");
checkArgument(tableId != null, "tableId is null");
checkArgument(authorizations != null, "authorizations is null");
this.context = context;
this.authorizations = authorizations;
this.tableId = tableId;
this.tableName = tableName;
this.numThreads = numQueryThreads;
queryThreadPool = context.threadPools().createFixedThreadPool(numQueryThreads,
"batch scanner " + batchReaderInstance + "-", false);
// Call shutdown on this thread pool in case the caller does not call close().
cleanable = CleanerUtil.shutdownThreadPoolExecutor(queryThreadPool, closed, log);
}
@Override
public void close() {
if (closed.compareAndSet(false, true)) {
// Shutdown the pool
queryThreadPool.shutdownNow();
// deregister the cleaner, will not call shutdownNow() because closed is now true
cleanable.clean();
}
}
@Override
public Authorizations getAuthorizations() {
return authorizations;
}
@Override
public void setRanges(Collection<Range> ranges) {
if (ranges == null || ranges.isEmpty()) {
throw new IllegalArgumentException("ranges must be non null and contain at least 1 range");
}
if (closed.get()) {
throw new IllegalStateException("batch reader closed");
}
this.ranges = new ArrayList<>(ranges);
}
@Override
public Iterator<Entry<Key,Value>> iterator() {
if (ranges == null) {
throw new IllegalStateException("ranges not set");
}
if (closed.get()) {
throw new IllegalStateException("batch reader closed");
}
return new TabletServerBatchReaderIterator(context, tableId, tableName, authorizations, ranges,
numThreads, queryThreadPool, this, retryTimeout);
}
}
| 9,841 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/BatchWriterImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.TableId;
public class BatchWriterImpl implements BatchWriter {
private final TableId tableId;
private final TabletServerBatchWriter bw;
public BatchWriterImpl(ClientContext context, TableId tableId, BatchWriterConfig config) {
checkArgument(context != null, "context is null");
checkArgument(tableId != null, "tableId is null");
if (config == null) {
config = new BatchWriterConfig();
}
this.tableId = tableId;
this.bw = new TabletServerBatchWriter(context, config);
}
@Override
public void addMutation(Mutation m) throws MutationsRejectedException {
checkArgument(m != null, "m is null");
bw.addMutation(tableId, m);
}
@Override
public void addMutations(Iterable<Mutation> iterable) throws MutationsRejectedException {
checkArgument(iterable != null, "iterable is null");
bw.addMutation(tableId, iterable.iterator());
}
@Override
public void close() throws MutationsRejectedException {
bw.close();
}
@Override
public void flush() throws MutationsRejectedException {
bw.flush();
}
}
| 9,842 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/TimeoutTabletLocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.TimedOutException;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.hadoop.io.Text;
/**
* Throws a {@link TimedOutException} if the specified timeout duration elapses between two failed
* TabletLocator calls.
* <p>
* This class is safe to cache locally.
*/
public class TimeoutTabletLocator extends SyncingTabletLocator {
private long timeout;
private Long firstFailTime = null;
private void failed() {
if (firstFailTime == null) {
firstFailTime = System.currentTimeMillis();
} else if (System.currentTimeMillis() - firstFailTime > timeout) {
throw new TimedOutException("Failed to obtain metadata");
}
}
private void succeeded() {
firstFailTime = null;
}
public TimeoutTabletLocator(long timeout, final ClientContext context, final TableId table) {
super(context, table);
this.timeout = timeout;
}
@Override
public TabletLocation locateTablet(ClientContext context, Text row, boolean skipRow,
boolean retry) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
try {
TabletLocation ret = super.locateTablet(context, row, skipRow, retry);
if (ret == null) {
failed();
} else {
succeeded();
}
return ret;
} catch (AccumuloException ae) {
failed();
throw ae;
}
}
@Override
public <T extends Mutation> void binMutations(ClientContext context, List<T> mutations,
Map<String,TabletServerMutations<T>> binnedMutations, List<T> failures)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
try {
super.binMutations(context, mutations, binnedMutations, failures);
if (failures.size() == mutations.size()) {
failed();
} else {
succeeded();
}
} catch (AccumuloException ae) {
failed();
throw ae;
}
}
@Override
public List<Range> binRanges(ClientContext context, List<Range> ranges,
Map<String,Map<KeyExtent,List<Range>>> binnedRanges)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
try {
List<Range> ret = super.binRanges(context, ranges, binnedRanges);
if (ranges.size() == ret.size()) {
failed();
} else {
succeeded();
}
return ret;
} catch (AccumuloException ae) {
failed();
throw ae;
}
}
}
| 9,843 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/DurabilityImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import org.apache.accumulo.core.client.Durability;
import org.apache.accumulo.core.tabletingest.thrift.TDurability;
public class DurabilityImpl {
public static TDurability toThrift(Durability durability) {
switch (durability) {
case DEFAULT:
return TDurability.DEFAULT;
case SYNC:
return TDurability.SYNC;
case FLUSH:
return TDurability.FLUSH;
case LOG:
return TDurability.LOG;
default:
return TDurability.NONE;
}
}
public static Durability fromString(String value) {
return Durability.valueOf(value.toUpperCase());
}
public static Durability fromThrift(TDurability tdurabilty) {
if (tdurabilty == null) {
return Durability.DEFAULT;
}
switch (tdurabilty) {
case DEFAULT:
return Durability.DEFAULT;
case SYNC:
return Durability.SYNC;
case FLUSH:
return Durability.FLUSH;
case LOG:
return Durability.LOG;
default:
return Durability.NONE;
}
}
public static Durability resolveDurabilty(Durability durability, Durability tabletDurability) {
if (durability == Durability.DEFAULT) {
return tabletDurability;
}
return durability;
}
}
| 9,844 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/IsolationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
public class IsolationException extends RuntimeException {
private static final long serialVersionUID = 1L;
/**
* @since 2.0.0
*/
public IsolationException(Exception cause) {
super(cause);
}
public IsolationException() {}
}
| 9,845 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineScanner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.hadoop.io.Text;
public class OfflineScanner extends ScannerOptions implements Scanner {
private int batchSize;
private Range range;
private ClientContext context;
private Authorizations authorizations;
private Text tableId;
public OfflineScanner(ClientContext context, TableId tableId, Authorizations authorizations) {
checkArgument(context != null, "context is null");
checkArgument(tableId != null, "tableId is null");
checkArgument(authorizations != null, "authorizations is null");
this.context = context;
this.tableId = new Text(tableId.canonical());
this.range = new Range((Key) null, (Key) null);
this.authorizations = authorizations;
this.batchSize = Constants.SCAN_BATCH_SIZE;
}
@Override
public void setRange(Range range) {
this.range = range;
}
@Override
public Range getRange() {
return range;
}
@Override
public void setBatchSize(int size) {
this.batchSize = size;
}
@Override
public int getBatchSize() {
return batchSize;
}
@Override
public void enableIsolation() {
}
@Override
public void disableIsolation() {
}
@Override
public Iterator<Entry<Key,Value>> iterator() {
return new OfflineIterator(this, context, authorizations, tableId, range);
}
@Override
public Authorizations getAuthorizations() {
return authorizations;
}
@Override
public long getReadaheadThreshold() {
throw new UnsupportedOperationException();
}
@Override
public void setReadaheadThreshold(long batches) {
throw new UnsupportedOperationException();
}
}
| 9,846 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/DelegationTokenImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.Objects.requireNonNull;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Collections;
import java.util.Set;
import org.apache.accumulo.core.client.security.tokens.DelegationToken;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.securityImpl.thrift.TAuthenticationTokenIdentifier;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DelegationTokenImpl extends PasswordToken implements DelegationToken {
private static final Logger log = LoggerFactory.getLogger(DelegationTokenImpl.class);
public static final String SERVICE_NAME = "AccumuloDelegationToken";
private final AuthenticationTokenIdentifier identifier;
public DelegationTokenImpl() {
this.identifier = new AuthenticationTokenIdentifier(new TAuthenticationTokenIdentifier());
}
public DelegationTokenImpl(byte[] delegationTokenPassword,
AuthenticationTokenIdentifier identifier) {
requireNonNull(delegationTokenPassword);
requireNonNull(identifier);
setPassword(delegationTokenPassword);
this.identifier = identifier;
}
public DelegationTokenImpl(String instanceID, UserGroupInformation user,
AuthenticationTokenIdentifier identifier) {
requireNonNull(instanceID);
requireNonNull(user);
requireNonNull(identifier);
Credentials creds = user.getCredentials();
Token<? extends TokenIdentifier> token =
creds.getToken(new Text(SERVICE_NAME + "-" + instanceID));
if (token == null) {
throw new IllegalArgumentException(
"Did not find Accumulo delegation token in provided UserGroupInformation");
}
setPasswordFromToken(token);
this.identifier = identifier;
}
public DelegationTokenImpl(Token<? extends TokenIdentifier> token,
AuthenticationTokenIdentifier identifier) {
requireNonNull(token);
requireNonNull(identifier);
setPasswordFromToken(token);
this.identifier = identifier;
}
private void setPasswordFromToken(Token<? extends TokenIdentifier> token) {
if (!AuthenticationTokenIdentifier.TOKEN_KIND.equals(token.getKind())) {
String msg = "Expected an AuthenticationTokenIdentifier but got a " + token.getKind();
log.error(msg);
throw new IllegalArgumentException(msg);
}
setPassword(token.getPassword());
}
/**
* The identifier for this token, may be null.
*/
public AuthenticationTokenIdentifier getIdentifier() {
return identifier;
}
/**
* The service name used to identify the {@link Token}
*/
public Text getServiceName() {
requireNonNull(identifier);
return new Text(SERVICE_NAME + "-" + identifier.getInstanceId());
}
@Override
public void init(Properties properties) {
// Encourage use of UserGroupInformation as entry point
}
@Override
public Set<TokenProperty> getProperties() {
// Encourage use of UserGroupInformation as entry point
return Collections.emptySet();
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
identifier.write(out);
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
identifier.readFields(in);
}
@Override
public DelegationTokenImpl clone() {
var clone = super.clone();
return new DelegationTokenImpl(clone.getPassword(),
new AuthenticationTokenIdentifier(identifier.getThriftIdentifier()));
}
@Override
public int hashCode() {
return super.hashCode() ^ identifier.hashCode();
}
@Override
public boolean equals(Object obj) {
// We assume we can cast obj to DelegationToken because the super.equals(obj) check ensures obj
// is of the same type as this
return super.equals(obj) && identifier.equals(((DelegationTokenImpl) obj).identifier);
}
}
| 9,847 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ThriftTransportKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.Objects.requireNonNull;
import java.util.Objects;
import org.apache.accumulo.core.rpc.SaslConnectionParams;
import org.apache.accumulo.core.rpc.SslConnectionParams;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.net.HostAndPort;
@VisibleForTesting
public class ThriftTransportKey {
private final HostAndPort server;
private final long timeout;
private final SslConnectionParams sslParams;
private final SaslConnectionParams saslParams;
private final int hash;
@VisibleForTesting
public ThriftTransportKey(HostAndPort server, long timeout, ClientContext context) {
this(server, timeout, context.getClientSslParams(), context.getSaslParams());
}
/**
* Visible only for testing
*/
ThriftTransportKey(HostAndPort server, long timeout, SslConnectionParams sslParams,
SaslConnectionParams saslParams) {
requireNonNull(server, "location is null");
this.server = server;
this.timeout = timeout;
this.sslParams = sslParams;
this.saslParams = saslParams;
if (saslParams != null && sslParams != null) {
// TSasl and TSSL transport factories don't play nicely together
throw new IllegalArgumentException("Cannot use both SSL and SASL thrift transports");
}
this.hash = Objects.hash(server, timeout, sslParams, saslParams);
}
HostAndPort getServer() {
return server;
}
long getTimeout() {
return timeout;
}
public boolean isSsl() {
return sslParams != null;
}
public boolean isSasl() {
return saslParams != null;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof ThriftTransportKey)) {
return false;
}
ThriftTransportKey ttk = (ThriftTransportKey) o;
return server.equals(ttk.server) && timeout == ttk.timeout
&& (!isSsl() || (ttk.isSsl() && sslParams.equals(ttk.sslParams)))
&& (!isSasl() || (ttk.isSasl() && saslParams.equals(ttk.saslParams)));
}
@Override
public int hashCode() {
return hash;
}
@Override
public String toString() {
String prefix = "";
if (isSsl()) {
prefix = "ssl:";
} else if (isSasl()) {
prefix = saslParams + ":";
}
return prefix + server + " (" + timeout + ")";
}
public SslConnectionParams getSslParams() {
return sslParams;
}
public SaslConnectionParams getSaslParams() {
return saslParams;
}
}
| 9,848 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.SECONDS;
import static java.util.stream.Collectors.toSet;
import static org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.LOCATION;
import static org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.PREV_ROW;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.apache.accumulo.core.util.Validators.EXISTING_TABLE_NAME;
import static org.apache.accumulo.core.util.Validators.NEW_TABLE_NAME;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.ConcurrentModificationException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.NamespaceExistsException;
import org.apache.accumulo.core.client.NamespaceNotFoundException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.TableOfflineException;
import org.apache.accumulo.core.client.admin.CloneConfiguration;
import org.apache.accumulo.core.client.admin.CompactionConfig;
import org.apache.accumulo.core.client.admin.DiskUsage;
import org.apache.accumulo.core.client.admin.FindMax;
import org.apache.accumulo.core.client.admin.ImportConfiguration;
import org.apache.accumulo.core.client.admin.Locations;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.apache.accumulo.core.client.admin.SummaryRetriever;
import org.apache.accumulo.core.client.admin.TableOperations;
import org.apache.accumulo.core.client.admin.TimeType;
import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer;
import org.apache.accumulo.core.client.admin.compaction.CompactionSelector;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
import org.apache.accumulo.core.client.summary.Summary;
import org.apache.accumulo.core.clientImpl.TabletLocator.TabletLocation;
import org.apache.accumulo.core.clientImpl.bulk.BulkImport;
import org.apache.accumulo.core.clientImpl.thrift.ClientService.Client;
import org.apache.accumulo.core.clientImpl.thrift.TDiskUsage;
import org.apache.accumulo.core.clientImpl.thrift.TVersionedProperties;
import org.apache.accumulo.core.clientImpl.thrift.ThriftNotActiveServiceException;
import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.data.constraints.Constraint;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.dataImpl.thrift.TRowRange;
import org.apache.accumulo.core.dataImpl.thrift.TSummaries;
import org.apache.accumulo.core.dataImpl.thrift.TSummarizerConfiguration;
import org.apache.accumulo.core.dataImpl.thrift.TSummaryRequest;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.manager.state.tables.TableState;
import org.apache.accumulo.core.manager.thrift.FateOperation;
import org.apache.accumulo.core.manager.thrift.FateService;
import org.apache.accumulo.core.manager.thrift.ManagerClientService;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.RootTable;
import org.apache.accumulo.core.metadata.schema.TabletDeletedException;
import org.apache.accumulo.core.metadata.schema.TabletMetadata;
import org.apache.accumulo.core.metadata.schema.TabletMetadata.Location;
import org.apache.accumulo.core.metadata.schema.TabletMetadata.LocationType;
import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
import org.apache.accumulo.core.rpc.ThriftUtil;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.summary.SummarizerConfigurationUtil;
import org.apache.accumulo.core.summary.SummaryCollection;
import org.apache.accumulo.core.tablet.thrift.TabletManagementClientService;
import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
import org.apache.accumulo.core.trace.TraceUtil;
import org.apache.accumulo.core.util.LocalityGroupUtil;
import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
import org.apache.accumulo.core.util.MapCounter;
import org.apache.accumulo.core.util.OpTimer;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.core.util.Retry;
import org.apache.accumulo.core.util.TextUtil;
import org.apache.accumulo.core.volume.VolumeConfiguration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.thrift.TApplicationException;
import org.apache.thrift.TException;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.net.HostAndPort;
public class TableOperationsImpl extends TableOperationsHelper {
public static final String PROPERTY_EXCLUDE_PREFIX = "!";
public static final String COMPACTION_CANCELED_MSG = "Compaction canceled";
public static final String TABLE_DELETED_MSG = "Table is being deleted";
private static final Logger log = LoggerFactory.getLogger(TableOperations.class);
private final ClientContext context;
public TableOperationsImpl(ClientContext context) {
checkArgument(context != null, "context is null");
this.context = context;
}
@Override
public SortedSet<String> list() {
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Fetching list of tables...", Thread.currentThread().getId());
timer = new OpTimer().start();
}
TreeSet<String> tableNames = new TreeSet<>(context.getTableNameToIdMap().keySet());
if (timer != null) {
timer.stop();
log.trace("tid={} Fetched {} table names in {}", Thread.currentThread().getId(),
tableNames.size(), String.format("%.3f secs", timer.scale(SECONDS)));
}
return tableNames;
}
@Override
public boolean exists(String tableName) {
EXISTING_TABLE_NAME.validate(tableName);
if (tableName.equals(MetadataTable.NAME) || tableName.equals(RootTable.NAME)) {
return true;
}
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Checking if table {} exists...", Thread.currentThread().getId(), tableName);
timer = new OpTimer().start();
}
boolean exists = context.getTableNameToIdMap().containsKey(tableName);
if (timer != null) {
timer.stop();
log.trace("tid={} Checked existence of {} in {}", Thread.currentThread().getId(), exists,
String.format("%.3f secs", timer.scale(SECONDS)));
}
return exists;
}
@Override
public void create(String tableName)
throws AccumuloException, AccumuloSecurityException, TableExistsException {
create(tableName, new NewTableConfiguration());
}
@Override
public void create(String tableName, NewTableConfiguration ntc)
throws AccumuloException, AccumuloSecurityException, TableExistsException {
NEW_TABLE_NAME.validate(tableName);
checkArgument(ntc != null, "ntc is null");
List<ByteBuffer> args = new ArrayList<>();
args.add(ByteBuffer.wrap(tableName.getBytes(UTF_8)));
args.add(ByteBuffer.wrap(ntc.getTimeType().name().getBytes(UTF_8)));
// Send info relating to initial table creation i.e, create online or offline
args.add(ByteBuffer.wrap(ntc.getInitialTableState().name().getBytes(UTF_8)));
// Check for possible initial splits to be added at table creation
// Always send number of initial splits to be created, even if zero. If greater than zero,
// add the splits to the argument List which will be used by the FATE operations.
int numSplits = ntc.getSplits().size();
args.add(ByteBuffer.wrap(String.valueOf(numSplits).getBytes(UTF_8)));
if (numSplits > 0) {
for (Text t : ntc.getSplits()) {
args.add(TextUtil.getByteBuffer(t));
}
}
Map<String,String> opts = ntc.getProperties();
try {
doTableFateOperation(tableName, AccumuloException.class, FateOperation.TABLE_CREATE, args,
opts);
} catch (TableNotFoundException e) {
// should not happen
throw new AssertionError(e);
}
}
private long beginFateOperation() throws ThriftSecurityException, TException {
while (true) {
FateService.Client client = null;
try {
client = ThriftClientTypes.FATE.getConnectionWithRetry(context);
return client.beginFateOperation(TraceUtil.traceInfo(), context.rpcCreds());
} catch (TTransportException tte) {
log.debug("Failed to call beginFateOperation(), retrying ... ", tte);
sleepUninterruptibly(100, MILLISECONDS);
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
log.debug("Contacted a Manager which is no longer active, retrying");
sleepUninterruptibly(100, MILLISECONDS);
} finally {
ThriftUtil.close(client, context);
}
}
}
// This method is for retrying in the case of network failures;
// anything else it passes to the caller to deal with
private void executeFateOperation(long opid, FateOperation op, List<ByteBuffer> args,
Map<String,String> opts, boolean autoCleanUp)
throws ThriftSecurityException, TException, ThriftTableOperationException {
while (true) {
FateService.Client client = null;
try {
client = ThriftClientTypes.FATE.getConnectionWithRetry(context);
client.executeFateOperation(TraceUtil.traceInfo(), context.rpcCreds(), opid, op, args, opts,
autoCleanUp);
return;
} catch (TTransportException tte) {
log.debug("Failed to call executeFateOperation(), retrying ... ", tte);
sleepUninterruptibly(100, MILLISECONDS);
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
log.debug("Contacted a Manager which is no longer active, retrying");
sleepUninterruptibly(100, MILLISECONDS);
} finally {
ThriftUtil.close(client, context);
}
}
}
private String waitForFateOperation(long opid)
throws ThriftSecurityException, TException, ThriftTableOperationException {
while (true) {
FateService.Client client = null;
try {
client = ThriftClientTypes.FATE.getConnectionWithRetry(context);
return client.waitForFateOperation(TraceUtil.traceInfo(), context.rpcCreds(), opid);
} catch (TTransportException tte) {
log.debug("Failed to call waitForFateOperation(), retrying ... ", tte);
sleepUninterruptibly(100, MILLISECONDS);
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
log.debug("Contacted a Manager which is no longer active, retrying");
sleepUninterruptibly(100, MILLISECONDS);
} finally {
ThriftUtil.close(client, context);
}
}
}
private void finishFateOperation(long opid) throws ThriftSecurityException, TException {
while (true) {
FateService.Client client = null;
try {
client = ThriftClientTypes.FATE.getConnectionWithRetry(context);
client.finishFateOperation(TraceUtil.traceInfo(), context.rpcCreds(), opid);
break;
} catch (TTransportException tte) {
log.debug("Failed to call finishFateOperation(), retrying ... ", tte);
sleepUninterruptibly(100, MILLISECONDS);
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
log.debug("Contacted a Manager which is no longer active, retrying");
sleepUninterruptibly(100, MILLISECONDS);
} finally {
ThriftUtil.close(client, context);
}
}
}
public String doBulkFateOperation(List<ByteBuffer> args, String tableName)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
try {
return doFateOperation(FateOperation.TABLE_BULK_IMPORT2, args, Collections.emptyMap(),
tableName);
} catch (TableExistsException | NamespaceExistsException e) {
// should not happen
throw new AssertionError(e);
} catch (NamespaceNotFoundException ne) {
throw new TableNotFoundException(null, tableName, "Namespace not found", ne);
}
}
String doFateOperation(FateOperation op, List<ByteBuffer> args, Map<String,String> opts,
String tableOrNamespaceName)
throws AccumuloSecurityException, TableExistsException, TableNotFoundException,
AccumuloException, NamespaceExistsException, NamespaceNotFoundException {
return doFateOperation(op, args, opts, tableOrNamespaceName, true);
}
String doFateOperation(FateOperation op, List<ByteBuffer> args, Map<String,String> opts,
String tableOrNamespaceName, boolean wait)
throws AccumuloSecurityException, TableExistsException, TableNotFoundException,
AccumuloException, NamespaceExistsException, NamespaceNotFoundException {
Long opid = null;
try {
opid = beginFateOperation();
executeFateOperation(opid, op, args, opts, !wait);
if (!wait) {
opid = null;
return null;
}
return waitForFateOperation(opid);
} catch (ThriftSecurityException e) {
switch (e.getCode()) {
case TABLE_DOESNT_EXIST:
throw new TableNotFoundException(null, tableOrNamespaceName,
"Target table does not exist");
case NAMESPACE_DOESNT_EXIST:
throw new NamespaceNotFoundException(null, tableOrNamespaceName,
"Target namespace does not exist");
default:
String tableInfo = context.getPrintableTableInfoFromName(tableOrNamespaceName);
throw new AccumuloSecurityException(e.user, e.code, tableInfo, e);
}
} catch (ThriftTableOperationException e) {
switch (e.getType()) {
case EXISTS:
throw new TableExistsException(e);
case NOTFOUND:
throw new TableNotFoundException(e);
case NAMESPACE_EXISTS:
throw new NamespaceExistsException(e);
case NAMESPACE_NOTFOUND:
throw new NamespaceNotFoundException(e);
case OFFLINE:
throw new TableOfflineException(
e.getTableId() == null ? null : TableId.of(e.getTableId()), tableOrNamespaceName);
case BULK_CONCURRENT_MERGE:
throw new AccumuloBulkMergeException(e);
default:
throw new AccumuloException(e.description, e);
}
} catch (Exception e) {
throw new AccumuloException(e.getMessage(), e);
} finally {
context.clearTableListCache();
// always finish table op, even when exception
if (opid != null) {
try {
finishFateOperation(opid);
} catch (Exception e) {
log.warn("Exception thrown while finishing fate table operation", e);
}
}
}
}
private static class SplitEnv {
private final String tableName;
private final TableId tableId;
private final ExecutorService executor;
private final CountDownLatch latch;
private final AtomicReference<Exception> exception;
SplitEnv(String tableName, TableId tableId, ExecutorService executor, CountDownLatch latch,
AtomicReference<Exception> exception) {
this.tableName = tableName;
this.tableId = tableId;
this.executor = executor;
this.latch = latch;
this.exception = exception;
}
}
private class SplitTask implements Runnable {
private List<Text> splits;
private SplitEnv env;
SplitTask(SplitEnv env, List<Text> splits) {
this.env = env;
this.splits = splits;
}
@Override
public void run() {
try {
if (env.exception.get() != null) {
return;
}
if (splits.size() <= 2) {
addSplits(env, new TreeSet<>(splits));
splits.forEach(s -> env.latch.countDown());
return;
}
int mid = splits.size() / 2;
// split the middle split point to ensure that child task split
// different tablets and can therefore run in parallel
addSplits(env, new TreeSet<>(splits.subList(mid, mid + 1)));
env.latch.countDown();
env.executor.execute(new SplitTask(env, splits.subList(0, mid)));
env.executor.execute(new SplitTask(env, splits.subList(mid + 1, splits.size())));
} catch (Exception t) {
env.exception.compareAndSet(null, t);
}
}
}
@Override
public void addSplits(String tableName, SortedSet<Text> partitionKeys)
throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
EXISTING_TABLE_NAME.validate(tableName);
TableId tableId = context.getTableId(tableName);
List<Text> splits = new ArrayList<>(partitionKeys);
// should be sorted because we copied from a sorted set, but that makes
// assumptions about how the copy was done so resort to be sure.
Collections.sort(splits);
CountDownLatch latch = new CountDownLatch(splits.size());
AtomicReference<Exception> exception = new AtomicReference<>(null);
ExecutorService executor = context.threadPools().createFixedThreadPool(16, "addSplits", false);
try {
executor.execute(
new SplitTask(new SplitEnv(tableName, tableId, executor, latch, exception), splits));
while (!latch.await(100, MILLISECONDS)) {
if (exception.get() != null) {
executor.shutdownNow();
Throwable excep = exception.get();
// Below all exceptions are wrapped and rethrown. This is done so that the user knows what
// code path got them here. If the wrapping was not done, the
// user would only have the stack trace for the background thread.
if (excep instanceof TableNotFoundException) {
TableNotFoundException tnfe = (TableNotFoundException) excep;
throw new TableNotFoundException(tableId.canonical(), tableName,
"Table not found by background thread", tnfe);
} else if (excep instanceof TableOfflineException) {
log.debug("TableOfflineException occurred in background thread. Throwing new exception",
excep);
throw new TableOfflineException(tableId, tableName);
} else if (excep instanceof AccumuloSecurityException) {
// base == background accumulo security exception
AccumuloSecurityException base = (AccumuloSecurityException) excep;
throw new AccumuloSecurityException(base.getUser(), base.asThriftException().getCode(),
base.getTableInfo(), excep);
} else if (excep instanceof AccumuloServerException) {
throw new AccumuloServerException((AccumuloServerException) excep);
} else if (excep instanceof Error) {
throw new Error(excep);
} else {
throw new AccumuloException(excep);
}
}
}
} catch (InterruptedException e) {
throw new IllegalStateException(e);
} finally {
executor.shutdown();
}
}
private void addSplits(SplitEnv env, SortedSet<Text> partitionKeys) throws AccumuloException,
AccumuloSecurityException, TableNotFoundException, AccumuloServerException {
TabletLocator tabLocator = TabletLocator.getLocator(context, env.tableId);
for (Text split : partitionKeys) {
boolean successful = false;
int attempt = 0;
long locationFailures = 0;
while (!successful) {
if (attempt > 0) {
sleepUninterruptibly(100, MILLISECONDS);
}
attempt++;
TabletLocation tl = tabLocator.locateTablet(context, split, false, false);
if (tl == null) {
context.requireTableExists(env.tableId, env.tableName);
context.requireNotOffline(env.tableId, env.tableName);
continue;
}
HostAndPort address = HostAndPort.fromString(tl.getTserverLocation());
try {
TabletManagementClientService.Client client =
ThriftUtil.getClient(ThriftClientTypes.TABLET_MGMT, address, context);
try {
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Splitting tablet {} on {} at {}", Thread.currentThread().getId(),
tl.getExtent(), address, split);
timer = new OpTimer().start();
}
client.splitTablet(TraceUtil.traceInfo(), context.rpcCreds(), tl.getExtent().toThrift(),
TextUtil.getByteBuffer(split));
// just split it, might as well invalidate it in the cache
tabLocator.invalidateCache(tl.getExtent());
if (timer != null) {
timer.stop();
log.trace("Split tablet in {}", String.format("%.3f secs", timer.scale(SECONDS)));
}
} finally {
ThriftUtil.returnClient(client, context);
}
} catch (TApplicationException tae) {
throw new AccumuloServerException(address.toString(), tae);
} catch (ThriftSecurityException e) {
context.clearTableListCache();
context.requireTableExists(env.tableId, env.tableName);
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (NotServingTabletException e) {
// Do not silently spin when we repeatedly fail to get the location for a tablet
locationFailures++;
if (locationFailures == 5 || locationFailures % 50 == 0) {
log.warn("Having difficulty locating hosting tabletserver for split {} on table {}."
+ " Seen {} failures.", split, env.tableName, locationFailures);
}
tabLocator.invalidateCache(tl.getExtent());
continue;
} catch (TException e) {
tabLocator.invalidateCache(context, tl.getTserverLocation());
continue;
}
successful = true;
}
}
}
@Override
public void merge(String tableName, Text start, Text end)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
ByteBuffer EMPTY = ByteBuffer.allocate(0);
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)),
start == null ? EMPTY : TextUtil.getByteBuffer(start),
end == null ? EMPTY : TextUtil.getByteBuffer(end));
Map<String,String> opts = new HashMap<>();
try {
doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_MERGE, args,
opts);
} catch (TableExistsException e) {
// should not happen
throw new AssertionError(e);
}
}
@Override
public void deleteRows(String tableName, Text start, Text end)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
ByteBuffer EMPTY = ByteBuffer.allocate(0);
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)),
start == null ? EMPTY : TextUtil.getByteBuffer(start),
end == null ? EMPTY : TextUtil.getByteBuffer(end));
Map<String,String> opts = new HashMap<>();
try {
doTableFateOperation(tableName, TableNotFoundException.class,
FateOperation.TABLE_DELETE_RANGE, args, opts);
} catch (TableExistsException e) {
// should not happen
throw new AssertionError(e);
}
}
@Override
public Collection<Text> listSplits(String tableName)
throws TableNotFoundException, AccumuloSecurityException {
// tableName is validated in _listSplits
return _listSplits(tableName);
}
private List<Text> _listSplits(String tableName)
throws TableNotFoundException, AccumuloSecurityException {
TableId tableId = context.getTableId(tableName);
while (true) {
try {
return context.getAmple().readTablets().forTable(tableId).fetch(PREV_ROW).checkConsistency()
.build().stream().map(tm -> tm.getExtent().endRow()).filter(Objects::nonNull)
.collect(Collectors.toList());
} catch (TabletDeletedException tde) {
// see if the table was deleted
context.requireTableExists(tableId, tableName);
log.debug("A merge happened while trying to list splits for {} {}, retrying ", tableName,
tableId, tde);
sleepUninterruptibly(3, SECONDS);
}
}
}
/**
* This version of listSplits is called when the maxSplits options is provided. If the value of
* maxSplits is greater than the number of existing splits, then all splits are returned and no
* additional processing is performed.
*
* But, if the value of maxSplits is less than the number of existing splits, maxSplit split
* values are returned. These split values are "evenly" selected from the existing splits based
* upon the algorithm implemented in the method.
*
* A stepSize is calculated based upon the number of splits requested and the total split count. A
* running sum adjusted by this stepSize is calculated as each split is parsed. Once this sum
* exceeds a value of 1, the current split point is selected to be returned. The sum is then
* decremented by 1 and the process continues until all existing splits have been parsed or
* maxSplits splits have been selected.
*
* @param tableName the name of the table
* @param maxSplits specifies the maximum number of splits to return
* @return a Collection containing a subset of evenly selected splits
*/
@Override
public Collection<Text> listSplits(final String tableName, final int maxSplits)
throws TableNotFoundException, AccumuloSecurityException {
// tableName is validated in _listSplits
final List<Text> existingSplits = _listSplits(tableName);
// As long as maxSplits is equal to or larger than the number of current splits, the existing
// splits are returned and no additional processing is necessary.
if (existingSplits.size() <= maxSplits) {
return existingSplits;
}
// When the number of maxSplits requested is less than the number of existing splits, the
// following code populates the splitsSubset list 'evenly' from the existing splits
ArrayList<Text> splitsSubset = new ArrayList<>(maxSplits);
final int SELECTION_THRESHOLD = 1;
// stepSize can never be greater than 1 due to the if-loop check above.
final double stepSize = (maxSplits + 1) / (double) existingSplits.size();
double selectionTrigger = 0.0;
for (Text existingSplit : existingSplits) {
if (splitsSubset.size() >= maxSplits) {
break;
}
selectionTrigger += stepSize;
if (selectionTrigger > SELECTION_THRESHOLD) {
splitsSubset.add(existingSplit);
selectionTrigger -= 1;
}
}
return splitsSubset;
}
@Override
public void delete(String tableName)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)));
Map<String,String> opts = new HashMap<>();
try {
doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_DELETE,
args, opts);
} catch (TableExistsException e) {
// should not happen
throw new AssertionError(e);
}
}
@Override
public void clone(String srcTableName, String newTableName, boolean flush,
Map<String,String> propertiesToSet, Set<String> propertiesToExclude)
throws AccumuloSecurityException, TableNotFoundException, AccumuloException,
TableExistsException {
clone(srcTableName, newTableName,
CloneConfiguration.builder().setFlush(flush).setPropertiesToSet(propertiesToSet)
.setPropertiesToExclude(propertiesToExclude).setKeepOffline(false).build());
}
@Override
public void clone(String srcTableName, String newTableName, CloneConfiguration config)
throws AccumuloSecurityException, TableNotFoundException, AccumuloException,
TableExistsException {
NEW_TABLE_NAME.validate(newTableName);
requireNonNull(config, "CloneConfiguration required.");
TableId srcTableId = context.getTableId(srcTableName);
if (config.isFlush()) {
_flush(srcTableId, null, null, true);
}
Map<String,String> opts = new HashMap<>();
validatePropertiesToSet(opts, config.getPropertiesToSet());
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(srcTableId.canonical().getBytes(UTF_8)),
ByteBuffer.wrap(newTableName.getBytes(UTF_8)),
ByteBuffer.wrap(Boolean.toString(config.isKeepOffline()).getBytes(UTF_8)));
prependPropertiesToExclude(opts, config.getPropertiesToExclude());
doTableFateOperation(newTableName, AccumuloException.class, FateOperation.TABLE_CLONE, args,
opts);
}
@Override
public void rename(String oldTableName, String newTableName) throws AccumuloSecurityException,
TableNotFoundException, AccumuloException, TableExistsException {
EXISTING_TABLE_NAME.validate(oldTableName);
NEW_TABLE_NAME.validate(newTableName);
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(oldTableName.getBytes(UTF_8)),
ByteBuffer.wrap(newTableName.getBytes(UTF_8)));
Map<String,String> opts = new HashMap<>();
doTableFateOperation(oldTableName, TableNotFoundException.class, FateOperation.TABLE_RENAME,
args, opts);
}
@Override
public void flush(String tableName) throws AccumuloException, AccumuloSecurityException {
// tableName is validated in the flush method being called below
try {
flush(tableName, null, null, false);
} catch (TableNotFoundException e) {
throw new AccumuloException(e.getMessage(), e);
}
}
@Override
public void flush(String tableName, Text start, Text end, boolean wait)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
_flush(context.getTableId(tableName), start, end, wait);
}
@Override
public void compact(String tableName, Text start, Text end, boolean flush, boolean wait)
throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
compact(tableName, start, end, new ArrayList<>(), flush, wait);
}
@Override
public void compact(String tableName, Text start, Text end, List<IteratorSetting> iterators,
boolean flush, boolean wait)
throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
compact(tableName, new CompactionConfig().setStartRow(start).setEndRow(end)
.setIterators(iterators).setFlush(flush).setWait(wait));
}
@Override
public void compact(String tableName, CompactionConfig config)
throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
EXISTING_TABLE_NAME.validate(tableName);
// Ensure compaction iterators exist on a tabletserver
final String skviName = SortedKeyValueIterator.class.getName();
for (IteratorSetting setting : config.getIterators()) {
String iteratorClass = setting.getIteratorClass();
if (!testClassLoad(tableName, iteratorClass, skviName)) {
throw new AccumuloException("TabletServer could not load iterator class " + iteratorClass);
}
}
if (!UserCompactionUtils.isDefault(config.getConfigurer())) {
if (!testClassLoad(tableName, config.getConfigurer().getClassName(),
CompactionConfigurer.class.getName())) {
throw new AccumuloException(
"TabletServer could not load " + CompactionConfigurer.class.getSimpleName() + " class "
+ config.getConfigurer().getClassName());
}
}
if (!UserCompactionUtils.isDefault(config.getSelector())) {
if (!testClassLoad(tableName, config.getSelector().getClassName(),
CompactionSelector.class.getName())) {
throw new AccumuloException(
"TabletServer could not load " + CompactionSelector.class.getSimpleName() + " class "
+ config.getSelector().getClassName());
}
}
TableId tableId = context.getTableId(tableName);
Text start = config.getStartRow();
Text end = config.getEndRow();
if (config.getFlush()) {
_flush(tableId, start, end, true);
}
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)),
ByteBuffer.wrap(UserCompactionUtils.encode(config)));
Map<String,String> opts = new HashMap<>();
try {
doFateOperation(FateOperation.TABLE_COMPACT, args, opts, tableName, config.getWait());
} catch (TableExistsException | NamespaceExistsException e) {
// should not happen
throw new AssertionError(e);
} catch (NamespaceNotFoundException e) {
throw new TableNotFoundException(null, tableName, "Namespace not found", e);
}
}
@Override
public void cancelCompaction(String tableName)
throws AccumuloSecurityException, TableNotFoundException, AccumuloException {
EXISTING_TABLE_NAME.validate(tableName);
TableId tableId = context.getTableId(tableName);
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)));
Map<String,String> opts = new HashMap<>();
try {
doTableFateOperation(tableName, TableNotFoundException.class,
FateOperation.TABLE_CANCEL_COMPACT, args, opts);
} catch (TableExistsException e) {
// should not happen
throw new AssertionError(e);
}
}
private void _flush(TableId tableId, Text start, Text end, boolean wait)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
try {
long flushID;
// used to pass the table name. but the tableid associated with a table name could change
// between calls.
// so pass the tableid to both calls
while (true) {
ManagerClientService.Client client = null;
try {
client = ThriftClientTypes.MANAGER.getConnectionWithRetry(context);
flushID =
client.initiateFlush(TraceUtil.traceInfo(), context.rpcCreds(), tableId.canonical());
break;
} catch (TTransportException tte) {
log.debug("Failed to call initiateFlush, retrying ... ", tte);
sleepUninterruptibly(100, MILLISECONDS);
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
log.debug("Contacted a Manager which is no longer active, retrying");
sleepUninterruptibly(100, MILLISECONDS);
} finally {
ThriftUtil.close(client, context);
}
}
while (true) {
ManagerClientService.Client client = null;
try {
client = ThriftClientTypes.MANAGER.getConnectionWithRetry(context);
client.waitForFlush(TraceUtil.traceInfo(), context.rpcCreds(), tableId.canonical(),
TextUtil.getByteBuffer(start), TextUtil.getByteBuffer(end), flushID,
wait ? Long.MAX_VALUE : 1);
break;
} catch (TTransportException tte) {
log.debug("Failed to call initiateFlush, retrying ... ", tte);
sleepUninterruptibly(100, MILLISECONDS);
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
log.debug("Contacted a Manager which is no longer active, retrying");
sleepUninterruptibly(100, MILLISECONDS);
} finally {
ThriftUtil.close(client, context);
}
}
} catch (ThriftSecurityException e) {
switch (e.getCode()) {
case TABLE_DOESNT_EXIST:
throw new TableNotFoundException(tableId.canonical(), null, e.getMessage(), e);
default:
log.debug("flush security exception on table id {}", tableId);
throw new AccumuloSecurityException(e.user, e.code, e);
}
} catch (ThriftTableOperationException e) {
switch (e.getType()) {
case NOTFOUND:
throw new TableNotFoundException(e);
default:
throw new AccumuloException(e.description, e);
}
} catch (Exception e) {
throw new AccumuloException(e);
}
}
@Override
public void setProperty(final String tableName, final String property, final String value)
throws AccumuloException, AccumuloSecurityException {
EXISTING_TABLE_NAME.validate(tableName);
checkArgument(property != null, "property is null");
checkArgument(value != null, "value is null");
try {
setPropertyNoChecks(tableName, property, value);
checkLocalityGroups(tableName, property);
} catch (TableNotFoundException e) {
throw new AccumuloException(e);
}
}
private Map<String,String> tryToModifyProperties(String tableName,
final Consumer<Map<String,String>> mapMutator) throws AccumuloException,
AccumuloSecurityException, IllegalArgumentException, ConcurrentModificationException {
final TVersionedProperties vProperties =
ThriftClientTypes.CLIENT.execute(context, client -> client
.getVersionedTableProperties(TraceUtil.traceInfo(), context.rpcCreds(), tableName));
mapMutator.accept(vProperties.getProperties());
// A reference to the map was passed to the user, maybe they still have the reference and are
// modifying it. Buggy Accumulo code could attempt to make modifications to the map after this
// point. Because of these potential issues, create an immutable snapshot of the map so that
// from here on the code is assured to always be dealing with the same map.
vProperties.setProperties(Map.copyOf(vProperties.getProperties()));
try {
// Send to server
ThriftClientTypes.MANAGER.executeVoid(context,
client -> client.modifyTableProperties(TraceUtil.traceInfo(), context.rpcCreds(),
tableName, vProperties));
for (String property : vProperties.getProperties().keySet()) {
checkLocalityGroups(tableName, property);
}
} catch (TableNotFoundException e) {
throw new AccumuloException(e);
}
return vProperties.getProperties();
}
@Override
public Map<String,String> modifyProperties(String tableName,
final Consumer<Map<String,String>> mapMutator)
throws AccumuloException, AccumuloSecurityException, IllegalArgumentException {
EXISTING_TABLE_NAME.validate(tableName);
checkArgument(mapMutator != null, "mapMutator is null");
Retry retry =
Retry.builder().infiniteRetries().retryAfter(25, MILLISECONDS).incrementBy(25, MILLISECONDS)
.maxWait(30, SECONDS).backOffFactor(1.5).logInterval(3, MINUTES).createRetry();
while (true) {
try {
var props = tryToModifyProperties(tableName, mapMutator);
retry.logCompletion(log, "Modifying properties for table " + tableName);
return props;
} catch (ConcurrentModificationException cme) {
try {
retry.logRetry(log, "Unable to modify table properties for " + tableName
+ " because of concurrent modification");
retry.waitForNextAttempt(log, "modify table properties for " + tableName);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} finally {
retry.useRetry();
}
}
}
private void setPropertyNoChecks(final String tableName, final String property,
final String value)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
ThriftClientTypes.MANAGER.executeVoid(context, client -> client
.setTableProperty(TraceUtil.traceInfo(), context.rpcCreds(), tableName, property, value));
}
@Override
public void removeProperty(final String tableName, final String property)
throws AccumuloException, AccumuloSecurityException {
EXISTING_TABLE_NAME.validate(tableName);
checkArgument(property != null, "property is null");
try {
removePropertyNoChecks(tableName, property);
checkLocalityGroups(tableName, property);
} catch (TableNotFoundException e) {
throw new AccumuloException(e);
}
}
private void removePropertyNoChecks(final String tableName, final String property)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
ThriftClientTypes.MANAGER.executeVoid(context, client -> client
.removeTableProperty(TraceUtil.traceInfo(), context.rpcCreds(), tableName, property));
}
void checkLocalityGroups(String tableName, String propChanged)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
if (LocalityGroupUtil.isLocalityGroupProperty(propChanged)) {
Map<String,String> allProps = getConfiguration(tableName);
try {
LocalityGroupUtil.checkLocalityGroups(allProps);
} catch (LocalityGroupConfigurationError | RuntimeException e) {
LoggerFactory.getLogger(this.getClass()).warn("Changing '" + propChanged + "' for table '"
+ tableName
+ "' resulted in bad locality group config. This may be a transient situation since "
+ "the config spreads over multiple properties. Setting properties in a different "
+ "order may help. Even though this warning was displayed, the property was updated. "
+ "Please check your config to ensure consistency.", e);
}
}
}
@Override
public Map<String,String> getConfiguration(final String tableName)
throws AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
try {
return ThriftClientTypes.CLIENT.execute(context, client -> client
.getTableConfiguration(TraceUtil.traceInfo(), context.rpcCreds(), tableName));
} catch (AccumuloException e) {
Throwable t = e.getCause();
if (t instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) t;
switch (ttoe.getType()) {
case NOTFOUND:
throw new TableNotFoundException(ttoe);
case NAMESPACE_NOTFOUND:
throw new TableNotFoundException(tableName, new NamespaceNotFoundException(ttoe));
default:
throw e;
}
}
throw e;
} catch (Exception e) {
throw new AccumuloException(e);
}
}
@Override
public Map<String,String> getTableProperties(final String tableName)
throws AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
try {
return ThriftClientTypes.CLIENT.execute(context, client -> client
.getTableProperties(TraceUtil.traceInfo(), context.rpcCreds(), tableName));
} catch (AccumuloException e) {
Throwable t = e.getCause();
if (t instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) t;
switch (ttoe.getType()) {
case NOTFOUND:
throw new TableNotFoundException(ttoe);
case NAMESPACE_NOTFOUND:
throw new TableNotFoundException(tableName, new NamespaceNotFoundException(ttoe));
default:
throw e;
}
}
throw e;
} catch (Exception e) {
throw new AccumuloException(e);
}
}
@Override
public void setLocalityGroups(String tableName, Map<String,Set<Text>> groups)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
// ensure locality groups do not overlap
LocalityGroupUtil.ensureNonOverlappingGroups(groups);
for (Entry<String,Set<Text>> entry : groups.entrySet()) {
Set<Text> colFams = entry.getValue();
String value = LocalityGroupUtil.encodeColumnFamilies(colFams);
setPropertyNoChecks(tableName, Property.TABLE_LOCALITY_GROUP_PREFIX + entry.getKey(), value);
}
try {
setPropertyNoChecks(tableName, Property.TABLE_LOCALITY_GROUPS.getKey(),
Joiner.on(",").join(groups.keySet()));
} catch (AccumuloException e) {
if (e.getCause() instanceof TableNotFoundException) {
throw (TableNotFoundException) e.getCause();
}
throw e;
}
// remove anything extraneous
String prefix = Property.TABLE_LOCALITY_GROUP_PREFIX.getKey();
for (Entry<String,String> entry : getProperties(tableName)) {
String property = entry.getKey();
if (property.startsWith(prefix)) {
// this property configures a locality group, find out which
// one:
String[] parts = property.split("\\.");
String group = parts[parts.length - 1];
if (!groups.containsKey(group)) {
removePropertyNoChecks(tableName, property);
}
}
}
}
@Override
public Map<String,Set<Text>> getLocalityGroups(String tableName)
throws AccumuloException, TableNotFoundException {
AccumuloConfiguration conf = new ConfigurationCopy(this.getProperties(tableName));
Map<String,Set<ByteSequence>> groups = LocalityGroupUtil.getLocalityGroups(conf);
Map<String,Set<Text>> groups2 = new HashMap<>();
for (Entry<String,Set<ByteSequence>> entry : groups.entrySet()) {
HashSet<Text> colFams = new HashSet<>();
for (ByteSequence bs : entry.getValue()) {
colFams.add(new Text(bs.toArray()));
}
groups2.put(entry.getKey(), colFams);
}
return groups2;
}
@Override
public Set<Range> splitRangeByTablets(String tableName, Range range, int maxSplits)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
checkArgument(range != null, "range is null");
if (maxSplits < 1) {
throw new IllegalArgumentException("maximum splits must be >= 1");
}
if (maxSplits == 1) {
return Collections.singleton(range);
}
Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<>();
TableId tableId = context.getTableId(tableName);
TabletLocator tl = TabletLocator.getLocator(context, tableId);
// its possible that the cache could contain complete, but old information about a tables
// tablets... so clear it
tl.invalidateCache();
while (!tl.binRanges(context, Collections.singletonList(range), binnedRanges).isEmpty()) {
context.requireNotDeleted(tableId);
context.requireNotOffline(tableId, tableName);
log.warn("Unable to locate bins for specified range. Retrying.");
// sleep randomly between 100 and 200ms
sleepUninterruptibly(100 + RANDOM.get().nextInt(100), MILLISECONDS);
binnedRanges.clear();
tl.invalidateCache();
}
// group key extents to get <= maxSplits
LinkedList<KeyExtent> unmergedExtents = new LinkedList<>();
List<KeyExtent> mergedExtents = new ArrayList<>();
for (Map<KeyExtent,List<Range>> map : binnedRanges.values()) {
unmergedExtents.addAll(map.keySet());
}
// the sort method is efficient for linked list
Collections.sort(unmergedExtents);
while (unmergedExtents.size() + mergedExtents.size() > maxSplits) {
if (unmergedExtents.size() >= 2) {
KeyExtent first = unmergedExtents.removeFirst();
KeyExtent second = unmergedExtents.removeFirst();
KeyExtent merged = new KeyExtent(first.tableId(), second.endRow(), first.prevEndRow());
mergedExtents.add(merged);
} else {
mergedExtents.addAll(unmergedExtents);
unmergedExtents.clear();
unmergedExtents.addAll(mergedExtents);
mergedExtents.clear();
}
}
mergedExtents.addAll(unmergedExtents);
Set<Range> ranges = new HashSet<>();
for (KeyExtent k : mergedExtents) {
ranges.add(k.toDataRange().clip(range));
}
return ranges;
}
private Path checkPath(String dir, String kind, String type)
throws IOException, AccumuloException, AccumuloSecurityException {
FileSystem fs = VolumeConfiguration.fileSystemForPath(dir, context.getHadoopConf());
Path ret = dir.contains(":") ? new Path(dir) : fs.makeQualified(new Path(dir));
try {
if (!fs.getFileStatus(ret).isDirectory()) {
throw new AccumuloException(
kind + " import " + type + " directory " + ret + " is not a directory!");
}
} catch (FileNotFoundException fnf) {
throw new AccumuloException(
kind + " import " + type + " directory " + ret + " does not exist!");
}
if (type.equals("failure")) {
FileStatus[] listStatus = fs.listStatus(ret);
if (listStatus != null && listStatus.length != 0) {
throw new AccumuloException("Bulk import failure directory " + ret + " is not empty");
}
}
return ret;
}
private void waitForTableStateTransition(TableId tableId, TableState expectedState)
throws AccumuloException, TableNotFoundException {
Text startRow = null;
Text lastRow = null;
while (true) {
if (context.getTableState(tableId) != expectedState) {
context.clearTableListCache();
TableState currentState = context.getTableState(tableId);
if (currentState != expectedState) {
context.requireNotDeleted(tableId);
if (currentState == TableState.DELETING) {
throw new TableNotFoundException(tableId.canonical(), "", TABLE_DELETED_MSG);
}
throw new AccumuloException(
"Unexpected table state " + tableId + " " + currentState + " != " + expectedState);
}
}
Range range;
if (startRow == null || lastRow == null) {
range = new KeyExtent(tableId, null, null).toMetaRange();
} else {
range = new Range(startRow, lastRow);
}
TabletsMetadata tablets = TabletsMetadata.builder(context).scanMetadataTable()
.overRange(range).fetch(LOCATION, PREV_ROW).build();
KeyExtent lastExtent = null;
int total = 0;
int waitFor = 0;
int holes = 0;
Text continueRow = null;
MapCounter<String> serverCounts = new MapCounter<>();
for (TabletMetadata tablet : tablets) {
total++;
Location loc = tablet.getLocation();
if ((expectedState == TableState.ONLINE
&& (loc == null || loc.getType() == LocationType.FUTURE))
|| (expectedState == TableState.OFFLINE && loc != null)) {
if (continueRow == null) {
continueRow = tablet.getExtent().toMetaRow();
}
waitFor++;
lastRow = tablet.getExtent().toMetaRow();
if (loc != null) {
serverCounts.increment(loc.getHostPortSession(), 1);
}
}
if (!tablet.getExtent().tableId().equals(tableId)) {
throw new AccumuloException(
"Saw unexpected table Id " + tableId + " " + tablet.getExtent());
}
if (lastExtent != null && !tablet.getExtent().isPreviousExtent(lastExtent)) {
holes++;
}
lastExtent = tablet.getExtent();
}
if (continueRow != null) {
startRow = continueRow;
}
if (holes > 0 || total == 0) {
startRow = null;
lastRow = null;
}
if (waitFor > 0 || holes > 0 || total == 0) {
long waitTime;
long maxPerServer = 0;
if (serverCounts.size() > 0) {
maxPerServer = serverCounts.max();
waitTime = maxPerServer * 10;
} else {
waitTime = waitFor * 10L;
}
waitTime = Math.max(100, waitTime);
waitTime = Math.min(5000, waitTime);
log.trace("Waiting for {}({}) tablets, startRow = {} lastRow = {}, holes={} sleeping:{}ms",
waitFor, maxPerServer, startRow, lastRow, holes, waitTime);
sleepUninterruptibly(waitTime, MILLISECONDS);
} else {
break;
}
}
}
@Override
public void offline(String tableName)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
offline(tableName, false);
}
@Override
public void offline(String tableName, boolean wait)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
TableId tableId = context.getTableId(tableName);
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)));
Map<String,String> opts = new HashMap<>();
try {
doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_OFFLINE,
args, opts);
} catch (TableExistsException e) {
// should not happen
throw new AssertionError(e);
}
if (wait) {
waitForTableStateTransition(tableId, TableState.OFFLINE);
}
}
@Override
public boolean isOnline(String tableName) throws AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
TableId tableId = context.getTableId(tableName);
TableState expectedState = context.getTableState(tableId, true);
return expectedState == TableState.ONLINE;
}
@Override
public void online(String tableName)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
online(tableName, false);
}
@Override
public void online(String tableName, boolean wait)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
TableId tableId = context.getTableId(tableName);
/**
* ACCUMULO-4574 if table is already online return without executing fate operation.
*/
if (isOnline(tableName)) {
if (wait) {
waitForTableStateTransition(tableId, TableState.ONLINE);
}
return;
}
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)));
Map<String,String> opts = new HashMap<>();
try {
doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_ONLINE,
args, opts);
} catch (TableExistsException e) {
// should not happen
throw new AssertionError(e);
}
if (wait) {
waitForTableStateTransition(tableId, TableState.ONLINE);
}
}
@Override
public void clearLocatorCache(String tableName) throws TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
TabletLocator tabLocator = TabletLocator.getLocator(context, context.getTableId(tableName));
tabLocator.invalidateCache();
}
@Override
public Map<String,String> tableIdMap() {
return context.getTableNameToIdMap().entrySet().stream()
.collect(Collectors.toMap(Entry::getKey, e -> e.getValue().canonical(), (v1, v2) -> {
throw new IllegalStateException(
String.format("Duplicate key for values %s and %s", v1, v2));
}, TreeMap::new));
}
@Override
public Text getMaxRow(String tableName, Authorizations auths, Text startRow,
boolean startInclusive, Text endRow, boolean endInclusive) throws TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
Scanner scanner = context.createScanner(tableName, auths);
return FindMax.findMax(scanner, startRow, startInclusive, endRow, endInclusive);
}
@Override
public List<DiskUsage> getDiskUsage(Set<String> tableNames)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
List<TDiskUsage> diskUsages = null;
while (diskUsages == null) {
Pair<String,Client> pair = null;
try {
// this operation may us a lot of memory... its likely that connections to tabletservers
// hosting metadata tablets will be cached, so do not use cached
// connections
pair = ThriftClientTypes.CLIENT.getThriftServerConnection(context, false);
diskUsages = pair.getSecond().getDiskUsage(tableNames, context.rpcCreds());
} catch (ThriftTableOperationException e) {
switch (e.getType()) {
case NOTFOUND:
throw new TableNotFoundException(e);
case NAMESPACE_NOTFOUND:
throw new TableNotFoundException(e.getTableName(), new NamespaceNotFoundException(e));
default:
throw new AccumuloException(e.description, e);
}
} catch (ThriftSecurityException e) {
throw new AccumuloSecurityException(e.getUser(), e.getCode());
} catch (TTransportException e) {
// some sort of communication error occurred, retry
if (pair == null) {
log.debug("Disk usage request failed. Pair is null. Retrying request...", e);
} else {
log.debug("Disk usage request failed {}, retrying ... ", pair.getFirst(), e);
}
sleepUninterruptibly(100, MILLISECONDS);
} catch (TException e) {
// may be a TApplicationException which indicates error on the server side
throw new AccumuloException(e);
} finally {
// must always return thrift connection
if (pair != null) {
ThriftUtil.close(pair.getSecond(), context);
}
}
}
List<DiskUsage> finalUsages = new ArrayList<>();
for (TDiskUsage diskUsage : diskUsages) {
finalUsages.add(new DiskUsage(new TreeSet<>(diskUsage.getTables()), diskUsage.getUsage()));
}
return finalUsages;
}
/**
* Search multiple directories for exportMetadata.zip, the control file used for the importable
* command.
*
* @param context used to obtain filesystem based on configuration
* @param importDirs the set of directories to search.
* @return the Path representing the location of the file.
* @throws AccumuloException if zero or more than one copy of the exportMetadata.zip file are
* found in the directories provided.
*/
public static Path findExportFile(ClientContext context, Set<String> importDirs)
throws AccumuloException {
LinkedHashSet<Path> exportFiles = new LinkedHashSet<>();
for (String importDir : importDirs) {
Path exportFilePath = null;
try {
FileSystem fs = new Path(importDir).getFileSystem(context.getHadoopConf());
exportFilePath = new Path(importDir, Constants.EXPORT_FILE);
log.debug("Looking for export metadata in {}", exportFilePath);
if (fs.exists(exportFilePath)) {
log.debug("Found export metadata in {}", exportFilePath);
exportFiles.add(exportFilePath);
}
} catch (IOException ioe) {
log.warn("Non-Fatal IOException reading export file: {}", exportFilePath, ioe);
}
}
if (exportFiles.size() > 1) {
String fileList = Arrays.toString(exportFiles.toArray());
log.warn("Found multiple export metadata files: " + fileList);
throw new AccumuloException("Found multiple export metadata files: " + fileList);
} else if (exportFiles.isEmpty()) {
log.warn("Unable to locate export metadata");
throw new AccumuloException("Unable to locate export metadata");
}
return exportFiles.iterator().next();
}
public static Map<String,String> getExportedProps(FileSystem fs, Path path) throws IOException {
HashMap<String,String> props = new HashMap<>();
try (ZipInputStream zis = new ZipInputStream(fs.open(path))) {
ZipEntry zipEntry;
while ((zipEntry = zis.getNextEntry()) != null) {
if (zipEntry.getName().equals(Constants.EXPORT_TABLE_CONFIG_FILE)) {
try (BufferedReader in = new BufferedReader(new InputStreamReader(zis, UTF_8))) {
String line;
while ((line = in.readLine()) != null) {
String[] sa = line.split("=", 2);
props.put(sa[0], sa[1]);
}
}
break;
}
}
}
return props;
}
@Override
public void importTable(String tableName, Set<String> importDirs, ImportConfiguration ic)
throws TableExistsException, AccumuloException, AccumuloSecurityException {
EXISTING_TABLE_NAME.validate(tableName);
checkArgument(importDirs != null, "importDir is null");
boolean keepOffline = ic.isKeepOffline();
boolean keepMapping = ic.isKeepMappings();
Set<String> checkedImportDirs = new HashSet<>();
try {
for (String s : importDirs) {
checkedImportDirs.add(checkPath(s, "Table", "").toString());
}
} catch (IOException e) {
throw new AccumuloException(e);
}
try {
Path exportFilePath = findExportFile(context, checkedImportDirs);
FileSystem fs = exportFilePath.getFileSystem(context.getHadoopConf());
Map<String,String> props = getExportedProps(fs, exportFilePath);
for (Entry<String,String> entry : props.entrySet()) {
if (Property.isClassProperty(entry.getKey())
&& !entry.getValue().contains(Constants.CORE_PACKAGE_NAME)) {
LoggerFactory.getLogger(this.getClass()).info(
"Imported table sets '{}' to '{}'. Ensure this class is on Accumulo classpath.",
sanitize(entry.getKey()), sanitize(entry.getValue()));
}
}
} catch (IOException ioe) {
LoggerFactory.getLogger(this.getClass()).warn(
"Failed to check if imported table references external java classes : {}",
ioe.getMessage());
}
List<ByteBuffer> args = new ArrayList<>(3 + checkedImportDirs.size());
args.add(0, ByteBuffer.wrap(tableName.getBytes(UTF_8)));
args.add(1, ByteBuffer.wrap(Boolean.toString(keepOffline).getBytes(UTF_8)));
args.add(2, ByteBuffer.wrap(Boolean.toString(keepMapping).getBytes(UTF_8)));
checkedImportDirs.stream().map(s -> s.getBytes(UTF_8)).map(ByteBuffer::wrap).forEach(args::add);
try {
doTableFateOperation(tableName, AccumuloException.class, FateOperation.TABLE_IMPORT, args,
Collections.emptyMap());
} catch (TableNotFoundException e) {
// should not happen
throw new AssertionError(e);
}
}
/**
* Prevent potential CRLF injection into logs from read in user data. See the
* <a href="https://find-sec-bugs.github.io/bugs.htm#CRLF_INJECTION_LOGS">bug description</a>
*/
private String sanitize(String msg) {
return msg.replaceAll("[\r\n]", "");
}
@Override
public void exportTable(String tableName, String exportDir)
throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
EXISTING_TABLE_NAME.validate(tableName);
checkArgument(exportDir != null, "exportDir is null");
if (isOnline(tableName)) {
throw new IllegalStateException("The table " + tableName + " is online; exportTable requires"
+ " a table to be offline before exporting.");
}
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes(UTF_8)),
ByteBuffer.wrap(exportDir.getBytes(UTF_8)));
Map<String,String> opts = Collections.emptyMap();
try {
doTableFateOperation(tableName, TableNotFoundException.class, FateOperation.TABLE_EXPORT,
args, opts);
} catch (TableExistsException e) {
// should not happen
throw new AssertionError(e);
}
}
@Override
public boolean testClassLoad(final String tableName, final String className,
final String asTypeName)
throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
EXISTING_TABLE_NAME.validate(tableName);
checkArgument(className != null, "className is null");
checkArgument(asTypeName != null, "asTypeName is null");
try {
return ThriftClientTypes.CLIENT.execute(context,
client -> client.checkTableClass(TraceUtil.traceInfo(), context.rpcCreds(), tableName,
className, asTypeName));
} catch (AccumuloSecurityException | AccumuloException e) {
Throwable t = e.getCause();
if (t instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) t;
switch (ttoe.getType()) {
case NOTFOUND:
throw new TableNotFoundException(ttoe);
case NAMESPACE_NOTFOUND:
throw new TableNotFoundException(tableName, new NamespaceNotFoundException(ttoe));
default:
throw e;
}
}
throw e;
} catch (Exception e) {
throw new AccumuloException(e);
}
}
@Override
public void attachIterator(String tableName, IteratorSetting setting,
EnumSet<IteratorScope> scopes)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
testClassLoad(tableName, setting.getIteratorClass(), SortedKeyValueIterator.class.getName());
super.attachIterator(tableName, setting, scopes);
}
@Override
public int addConstraint(String tableName, String constraintClassName)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
testClassLoad(tableName, constraintClassName, Constraint.class.getName());
return super.addConstraint(tableName, constraintClassName);
}
private void doTableFateOperation(String tableOrNamespaceName,
Class<? extends Exception> namespaceNotFoundExceptionClass, FateOperation op,
List<ByteBuffer> args, Map<String,String> opts) throws AccumuloSecurityException,
AccumuloException, TableExistsException, TableNotFoundException {
try {
doFateOperation(op, args, opts, tableOrNamespaceName);
} catch (NamespaceExistsException e) {
// should not happen
throw new AssertionError(e);
} catch (NamespaceNotFoundException e) {
if (namespaceNotFoundExceptionClass == null) {
// should not happen
throw new AssertionError(e);
} else if (AccumuloException.class.isAssignableFrom(namespaceNotFoundExceptionClass)) {
throw new AccumuloException("Cannot create table in non-existent namespace", e);
} else if (TableNotFoundException.class.isAssignableFrom(namespaceNotFoundExceptionClass)) {
throw new TableNotFoundException(null, tableOrNamespaceName, "Namespace not found", e);
} else {
// should not happen
throw new AssertionError(e);
}
}
}
private void clearSamplerOptions(String tableName)
throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
EXISTING_TABLE_NAME.validate(tableName);
String prefix = Property.TABLE_SAMPLER_OPTS.getKey();
for (Entry<String,String> entry : getProperties(tableName)) {
String property = entry.getKey();
if (property.startsWith(prefix)) {
removeProperty(tableName, property);
}
}
}
@Override
public void setSamplerConfiguration(String tableName, SamplerConfiguration samplerConfiguration)
throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
EXISTING_TABLE_NAME.validate(tableName);
clearSamplerOptions(tableName);
Map<String,String> props =
new SamplerConfigurationImpl(samplerConfiguration).toTablePropertiesMap();
modifyProperties(tableName, properties -> properties.putAll(props));
}
@Override
public void clearSamplerConfiguration(String tableName)
throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
EXISTING_TABLE_NAME.validate(tableName);
removeProperty(tableName, Property.TABLE_SAMPLER.getKey());
clearSamplerOptions(tableName);
}
@Override
public SamplerConfiguration getSamplerConfiguration(String tableName)
throws TableNotFoundException, AccumuloSecurityException, AccumuloException {
EXISTING_TABLE_NAME.validate(tableName);
AccumuloConfiguration conf = new ConfigurationCopy(this.getProperties(tableName));
SamplerConfigurationImpl sci = SamplerConfigurationImpl.newSamplerConfig(conf);
if (sci == null) {
return null;
}
return sci.toSamplerConfiguration();
}
private static class LocationsImpl implements Locations {
private Map<Range,List<TabletId>> groupedByRanges;
private Map<TabletId,List<Range>> groupedByTablets;
private Map<TabletId,String> tabletLocations;
public LocationsImpl(Map<String,Map<KeyExtent,List<Range>>> binnedRanges) {
groupedByTablets = new HashMap<>();
groupedByRanges = null;
tabletLocations = new HashMap<>();
for (Entry<String,Map<KeyExtent,List<Range>>> entry : binnedRanges.entrySet()) {
String location = entry.getKey();
for (Entry<KeyExtent,List<Range>> entry2 : entry.getValue().entrySet()) {
TabletIdImpl tabletId = new TabletIdImpl(entry2.getKey());
tabletLocations.put(tabletId, location);
List<Range> prev =
groupedByTablets.put(tabletId, Collections.unmodifiableList(entry2.getValue()));
if (prev != null) {
throw new IllegalStateException(
"Unexpected : tablet at multiple locations : " + location + " " + tabletId);
}
}
}
groupedByTablets = Collections.unmodifiableMap(groupedByTablets);
}
@Override
public String getTabletLocation(TabletId tabletId) {
return tabletLocations.get(tabletId);
}
@Override
public Map<Range,List<TabletId>> groupByRange() {
if (groupedByRanges == null) {
Map<Range,List<TabletId>> tmp = new HashMap<>();
groupedByTablets.forEach((tabletId, rangeList) -> rangeList
.forEach(range -> tmp.computeIfAbsent(range, k -> new ArrayList<>()).add(tabletId)));
Map<Range,List<TabletId>> tmp2 = new HashMap<>();
for (Entry<Range,List<TabletId>> entry : tmp.entrySet()) {
tmp2.put(entry.getKey(), Collections.unmodifiableList(entry.getValue()));
}
groupedByRanges = Collections.unmodifiableMap(tmp2);
}
return groupedByRanges;
}
@Override
public Map<TabletId,List<Range>> groupByTablet() {
return groupedByTablets;
}
}
@Override
public Locations locate(String tableName, Collection<Range> ranges)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
requireNonNull(ranges, "ranges must be non null");
TableId tableId = context.getTableId(tableName);
TabletLocator locator = TabletLocator.getLocator(context, tableId);
List<Range> rangeList = null;
if (ranges instanceof List) {
rangeList = (List<Range>) ranges;
} else {
rangeList = new ArrayList<>(ranges);
}
Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<>();
locator.invalidateCache();
Retry retry = Retry.builder().infiniteRetries().retryAfter(100, MILLISECONDS)
.incrementBy(100, MILLISECONDS).maxWait(2, SECONDS).backOffFactor(1.5)
.logInterval(3, MINUTES).createRetry();
while (!locator.binRanges(context, rangeList, binnedRanges).isEmpty()) {
context.requireTableExists(tableId, tableName);
context.requireNotOffline(tableId, tableName);
binnedRanges.clear();
try {
retry.waitForNextAttempt(log,
String.format("locating tablets in table %s(%s) for %d ranges", tableName, tableId,
rangeList.size()));
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
locator.invalidateCache();
}
return new LocationsImpl(binnedRanges);
}
@Override
public SummaryRetriever summaries(String tableName) {
EXISTING_TABLE_NAME.validate(tableName);
return new SummaryRetriever() {
private Text startRow = null;
private Text endRow = null;
private List<TSummarizerConfiguration> summariesToFetch = Collections.emptyList();
private String summarizerClassRegex;
private boolean flush = false;
@Override
public SummaryRetriever startRow(Text startRow) {
Objects.requireNonNull(startRow);
if (endRow != null) {
Preconditions.checkArgument(startRow.compareTo(endRow) < 0,
"Start row must be less than end row : %s >= %s", startRow, endRow);
}
this.startRow = startRow;
return this;
}
@Override
public SummaryRetriever startRow(CharSequence startRow) {
return startRow(new Text(startRow.toString()));
}
@Override
public List<Summary> retrieve()
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
TableId tableId = context.getTableId(tableName);
context.requireNotOffline(tableId, tableName);
TRowRange range =
new TRowRange(TextUtil.getByteBuffer(startRow), TextUtil.getByteBuffer(endRow));
TSummaryRequest request =
new TSummaryRequest(tableId.canonical(), range, summariesToFetch, summarizerClassRegex);
if (flush) {
_flush(tableId, startRow, endRow, true);
}
TSummaries ret = ThriftClientTypes.TABLET_SERVER.execute(context, client -> {
TSummaries tsr =
client.startGetSummaries(TraceUtil.traceInfo(), context.rpcCreds(), request);
while (!tsr.finished) {
tsr = client.contiuneGetSummaries(TraceUtil.traceInfo(), tsr.sessionId);
}
return tsr;
});
return new SummaryCollection(ret).getSummaries();
}
@Override
public SummaryRetriever endRow(Text endRow) {
Objects.requireNonNull(endRow);
if (startRow != null) {
Preconditions.checkArgument(startRow.compareTo(endRow) < 0,
"Start row must be less than end row : %s >= %s", startRow, endRow);
}
this.endRow = endRow;
return this;
}
@Override
public SummaryRetriever endRow(CharSequence endRow) {
return endRow(new Text(endRow.toString()));
}
@Override
public SummaryRetriever withConfiguration(Collection<SummarizerConfiguration> configs) {
Objects.requireNonNull(configs);
summariesToFetch = configs.stream().map(SummarizerConfigurationUtil::toThrift)
.collect(Collectors.toList());
return this;
}
@Override
public SummaryRetriever withConfiguration(SummarizerConfiguration... config) {
Objects.requireNonNull(config);
return withConfiguration(Arrays.asList(config));
}
@Override
public SummaryRetriever withMatchingConfiguration(String regex) {
Objects.requireNonNull(regex);
// Do a sanity check here to make sure that regex compiles, instead of having it fail on a
// tserver.
Pattern.compile(regex);
this.summarizerClassRegex = regex;
return this;
}
@Override
public SummaryRetriever flush(boolean b) {
this.flush = b;
return this;
}
};
}
@Override
public void addSummarizers(String tableName, SummarizerConfiguration... newConfigs)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
HashSet<SummarizerConfiguration> currentConfigs =
new HashSet<>(SummarizerConfiguration.fromTableProperties(getProperties(tableName)));
HashSet<SummarizerConfiguration> newConfigSet = new HashSet<>(Arrays.asList(newConfigs));
newConfigSet.removeIf(currentConfigs::contains);
Set<String> newIds =
newConfigSet.stream().map(SummarizerConfiguration::getPropertyId).collect(toSet());
for (SummarizerConfiguration csc : currentConfigs) {
if (newIds.contains(csc.getPropertyId())) {
throw new IllegalArgumentException("Summarizer property id is in use by " + csc);
}
}
Map<String,String> props = SummarizerConfiguration.toTableProperties(newConfigSet);
modifyProperties(tableName, properties -> properties.putAll(props));
}
@Override
public void removeSummarizers(String tableName, Predicate<SummarizerConfiguration> predicate)
throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
EXISTING_TABLE_NAME.validate(tableName);
Collection<SummarizerConfiguration> summarizerConfigs =
SummarizerConfiguration.fromTableProperties(getProperties(tableName));
modifyProperties(tableName,
properties -> summarizerConfigs.stream().filter(predicate)
.map(sc -> sc.toTableProperties().keySet())
.forEach(keySet -> keySet.forEach(properties::remove)));
}
@Override
public List<SummarizerConfiguration> listSummarizers(String tableName)
throws AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
return new ArrayList<>(SummarizerConfiguration.fromTableProperties(getProperties(tableName)));
}
@Override
public ImportDestinationArguments importDirectory(String directory) {
return new BulkImport(directory, context);
}
@Override
public TimeType getTimeType(final String tableName) throws TableNotFoundException {
TableId tableId = context.getTableId(tableName);
Optional<TabletMetadata> tabletMetadata = context.getAmple().readTablets().forTable(tableId)
.fetch(TabletMetadata.ColumnType.TIME).checkConsistency().build().stream().findFirst();
TabletMetadata timeData =
tabletMetadata.orElseThrow(() -> new IllegalStateException("Failed to retrieve TimeType"));
return timeData.getTime().getType();
}
private void prependPropertiesToExclude(Map<String,String> opts, Set<String> propsToExclude) {
if (propsToExclude == null) {
return;
}
for (String prop : propsToExclude) {
opts.put(PROPERTY_EXCLUDE_PREFIX + prop, "");
}
}
private void validatePropertiesToSet(Map<String,String> opts, Map<String,String> propsToSet) {
if (propsToSet == null) {
return;
}
propsToSet.forEach((k, v) -> {
if (k.startsWith(PROPERTY_EXCLUDE_PREFIX)) {
throw new IllegalArgumentException(
"Property can not start with " + PROPERTY_EXCLUDE_PREFIX);
}
opts.put(k, v);
});
}
}
| 9,849 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/TabletServerBatchWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.SECONDS;
import static java.util.function.Function.identity;
import static java.util.stream.Collectors.toList;
import static java.util.stream.Collectors.toMap;
import java.io.IOException;
import java.lang.management.CompilationMXBean;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.OptionalLong;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.Durability;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableDeletedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.TableOfflineException;
import org.apache.accumulo.core.client.TimedOutException;
import org.apache.accumulo.core.clientImpl.TabletLocator.TabletServerMutations;
import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode;
import org.apache.accumulo.core.clientImpl.thrift.TInfo;
import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.constraints.Violations;
import org.apache.accumulo.core.data.ConstraintViolationSummary;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.dataImpl.thrift.TMutation;
import org.apache.accumulo.core.dataImpl.thrift.UpdateErrors;
import org.apache.accumulo.core.lock.ServiceLock;
import org.apache.accumulo.core.rpc.ThriftUtil;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes;
import org.apache.accumulo.core.tabletingest.thrift.TabletIngestClientService;
import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
import org.apache.accumulo.core.trace.TraceUtil;
import org.apache.accumulo.core.util.Retry;
import org.apache.accumulo.core.util.threads.ThreadPools;
import org.apache.accumulo.core.util.threads.Threads;
import org.apache.thrift.TApplicationException;
import org.apache.thrift.TException;
import org.apache.thrift.TServiceClient;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import com.google.common.net.HostAndPort;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.context.Scope;
/*
* Differences from previous TabletServerBatchWriter
* + As background threads finish sending mutations to tablet servers they decrement memory usage
* + Once the queue of unprocessed mutations reaches 50% it is always pushed
* to the background threads, even if they are currently processing... new
* mutations are merged with mutations currently processing in the background
* + Failed mutations are held for 1000ms and then re-added to the unprocessed queue
* + Flush holds adding of new mutations so it does not wait indefinitely
*
* Considerations
* + All background threads must catch and note Exception
* + mutations for a single tablet server are only processed by one thread
* concurrently (if new mutations come in for a tablet server while one
* thread is processing mutations for it, no other thread should
* start processing those mutations)
*
* Memory accounting
* + when a mutation enters the system memory is incremented
* + when a mutation successfully leaves the system memory is decremented
*/
public class TabletServerBatchWriter implements AutoCloseable {
private static final Logger log = LoggerFactory.getLogger(TabletServerBatchWriter.class);
// basic configuration
private final ClientContext context;
private final long maxMem;
private final long maxLatency;
private final long timeout;
private final Durability durability;
// state
private boolean flushing;
private boolean closed;
private MutationSet mutations;
// background writer
private final MutationWriter writer;
// latency timers
private final ScheduledThreadPoolExecutor executor;
private ScheduledFuture<?> latencyTimerFuture;
private final Map<String,TimeoutTracker> timeoutTrackers =
Collections.synchronizedMap(new HashMap<>());
// stats
private long totalMemUsed = 0;
private long lastProcessingStartTime;
private long totalAdded = 0;
private final AtomicLong totalSent = new AtomicLong(0);
private final AtomicLong totalBinned = new AtomicLong(0);
private final AtomicLong totalBinTime = new AtomicLong(0);
private final AtomicLong totalSendTime = new AtomicLong(0);
private long startTime = 0;
private long initialGCTimes;
private long initialCompileTimes;
private double initialSystemLoad;
private AtomicInteger tabletServersBatchSum = new AtomicInteger(0);
private AtomicInteger tabletBatchSum = new AtomicInteger(0);
private AtomicInteger numBatches = new AtomicInteger(0);
private AtomicInteger maxTabletBatch = new AtomicInteger(Integer.MIN_VALUE);
private AtomicInteger minTabletBatch = new AtomicInteger(Integer.MAX_VALUE);
private AtomicInteger minTabletServersBatch = new AtomicInteger(Integer.MAX_VALUE);
private AtomicInteger maxTabletServersBatch = new AtomicInteger(Integer.MIN_VALUE);
// error handling
private final Violations violations = new Violations();
private final Map<KeyExtent,Set<SecurityErrorCode>> authorizationFailures = new HashMap<>();
private final HashSet<String> serverSideErrors = new HashSet<>();
private final FailedMutations failedMutations;
private int unknownErrors = 0;
private final AtomicBoolean somethingFailed = new AtomicBoolean(false);
private Exception lastUnknownError = null;
private static class TimeoutTracker {
final String server;
final long timeOut;
long activityTime;
Long firstErrorTime = null;
TimeoutTracker(String server, long timeOut) {
this.timeOut = timeOut;
this.server = server;
}
void startingWrite() {
activityTime = System.currentTimeMillis();
}
void madeProgress() {
activityTime = System.currentTimeMillis();
firstErrorTime = null;
}
void wroteNothing() {
if (firstErrorTime == null) {
firstErrorTime = activityTime;
} else if (System.currentTimeMillis() - firstErrorTime > timeOut) {
throw new TimedOutException(Collections.singleton(server));
}
}
void errorOccured() {
wroteNothing();
}
public long getTimeOut() {
return timeOut;
}
}
public TabletServerBatchWriter(ClientContext context, BatchWriterConfig config) {
this.context = context;
this.executor = context.threadPools()
.createGeneralScheduledExecutorService(this.context.getConfiguration());
this.failedMutations = new FailedMutations();
this.maxMem = config.getMaxMemory();
this.maxLatency = config.getMaxLatency(MILLISECONDS) <= 0 ? Long.MAX_VALUE
: config.getMaxLatency(MILLISECONDS);
this.timeout = config.getTimeout(MILLISECONDS);
this.mutations = new MutationSet();
this.lastProcessingStartTime = System.currentTimeMillis();
this.durability = config.getDurability();
this.writer = new MutationWriter(config.getMaxWriteThreads());
if (this.maxLatency != Long.MAX_VALUE) {
latencyTimerFuture = executor
.scheduleWithFixedDelay(Threads.createNamedRunnable("BatchWriterLatencyTimer", () -> {
try {
synchronized (TabletServerBatchWriter.this) {
if ((System.currentTimeMillis() - lastProcessingStartTime)
> TabletServerBatchWriter.this.maxLatency) {
startProcessing();
}
}
} catch (Exception e) {
updateUnknownErrors("Max latency task failed " + e.getMessage(), e);
}
}), 0, this.maxLatency / 4, MILLISECONDS);
}
}
private synchronized void startProcessing() {
if (mutations.getMemoryUsed() == 0) {
return;
}
lastProcessingStartTime = System.currentTimeMillis();
writer.queueMutations(mutations);
mutations = new MutationSet();
}
private synchronized void decrementMemUsed(long amount) {
totalMemUsed -= amount;
this.notifyAll();
}
public synchronized void addMutation(TableId table, Mutation m)
throws MutationsRejectedException {
if (closed) {
throw new IllegalStateException("Closed");
}
if (m.size() == 0) {
throw new IllegalArgumentException("Can not add empty mutations");
}
if (this.latencyTimerFuture != null) {
ThreadPools.ensureRunning(this.latencyTimerFuture,
"Latency timer thread has exited, cannot guarantee latency target");
}
checkForFailures();
waitRTE(() -> (totalMemUsed > maxMem || flushing) && !somethingFailed.get());
// do checks again since things could have changed while waiting and not holding lock
if (closed) {
throw new IllegalStateException("Closed");
}
checkForFailures();
if (startTime == 0) {
startTime = System.currentTimeMillis();
List<GarbageCollectorMXBean> gcmBeans = ManagementFactory.getGarbageCollectorMXBeans();
for (GarbageCollectorMXBean garbageCollectorMXBean : gcmBeans) {
initialGCTimes += garbageCollectorMXBean.getCollectionTime();
}
CompilationMXBean compMxBean = ManagementFactory.getCompilationMXBean();
if (compMxBean.isCompilationTimeMonitoringSupported()) {
initialCompileTimes = compMxBean.getTotalCompilationTime();
}
initialSystemLoad = ManagementFactory.getOperatingSystemMXBean().getSystemLoadAverage();
}
// create a copy of mutation so that after this method returns the user
// is free to reuse the mutation object, like calling readFields... this
// is important for the case where a mutation is passed from map to reduce
// to batch writer... the map reduce code will keep passing the same mutation
// object into the reduce method
m = new Mutation(m);
totalMemUsed += m.estimatedMemoryUsed();
mutations.addMutation(table, m);
totalAdded++;
if (mutations.getMemoryUsed() >= maxMem / 2) {
startProcessing();
checkForFailures();
}
}
public void addMutation(TableId table, Iterator<Mutation> iterator)
throws MutationsRejectedException {
while (iterator.hasNext()) {
addMutation(table, iterator.next());
}
}
public synchronized void flush() throws MutationsRejectedException {
if (closed) {
throw new IllegalStateException("Closed");
}
Span span = TraceUtil.startSpan(this.getClass(), "flush");
try (Scope scope = span.makeCurrent()) {
checkForFailures();
if (flushing) {
// some other thread is currently flushing, so wait
waitRTE(() -> flushing && !somethingFailed.get());
checkForFailures();
return;
}
flushing = true;
startProcessing();
checkForFailures();
waitRTE(() -> totalMemUsed > 0 && !somethingFailed.get());
flushing = false;
this.notifyAll();
checkForFailures();
} catch (Exception e) {
TraceUtil.setException(span, e, true);
throw e;
} finally {
span.end();
}
}
@Override
public synchronized void close() throws MutationsRejectedException {
if (closed) {
return;
}
Span span = TraceUtil.startSpan(this.getClass(), "close");
try (Scope scope = span.makeCurrent()) {
closed = true;
startProcessing();
waitRTE(() -> totalMemUsed > 0 && !somethingFailed.get());
logStats();
checkForFailures();
} catch (Exception e) {
TraceUtil.setException(span, e, true);
throw e;
} finally {
span.end();
// make a best effort to release these resources
writer.binningThreadPool.shutdownNow();
writer.sendThreadPool.shutdownNow();
executor.shutdownNow();
}
}
private void logStats() {
if (log.isTraceEnabled()) {
long finishTime = System.currentTimeMillis();
long finalGCTimes = 0;
List<GarbageCollectorMXBean> gcmBeans = ManagementFactory.getGarbageCollectorMXBeans();
for (GarbageCollectorMXBean garbageCollectorMXBean : gcmBeans) {
finalGCTimes += garbageCollectorMXBean.getCollectionTime();
}
CompilationMXBean compMxBean = ManagementFactory.getCompilationMXBean();
long finalCompileTimes = 0;
if (compMxBean.isCompilationTimeMonitoringSupported()) {
finalCompileTimes = compMxBean.getTotalCompilationTime();
}
double averageRate = totalSent.get() / (totalSendTime.get() / 1000.0);
double overallRate = totalAdded / ((finishTime - startTime) / 1000.0);
double finalSystemLoad = ManagementFactory.getOperatingSystemMXBean().getSystemLoadAverage();
log.trace("");
log.trace("TABLET SERVER BATCH WRITER STATISTICS");
log.trace(String.format("Added : %,10d mutations", totalAdded));
log.trace(String.format("Sent : %,10d mutations", totalSent.get()));
log.trace(String.format("Resent percentage : %10.2f%s",
(totalSent.get() - totalAdded) / (double) totalAdded * 100.0, "%"));
log.trace(
String.format("Overall time : %,10.2f secs", (finishTime - startTime) / 1000.0));
log.trace(String.format("Overall send rate : %,10.2f mutations/sec", overallRate));
log.trace(
String.format("Send efficiency : %10.2f%s", overallRate / averageRate * 100.0, "%"));
log.trace("");
log.trace("BACKGROUND WRITER PROCESS STATISTICS");
log.trace(
String.format("Total send time : %,10.2f secs %6.2f%s", totalSendTime.get() / 1000.0,
100.0 * totalSendTime.get() / (finishTime - startTime), "%"));
log.trace(String.format("Average send rate : %,10.2f mutations/sec", averageRate));
log.trace(String.format("Total bin time : %,10.2f secs %6.2f%s",
totalBinTime.get() / 1000.0, 100.0 * totalBinTime.get() / (finishTime - startTime), "%"));
log.trace(String.format("Average bin rate : %,10.2f mutations/sec",
totalBinned.get() / (totalBinTime.get() / 1000.0)));
log.trace(String.format("tservers per batch : %,8.2f avg %,6d min %,6d max",
(float) (numBatches.get() != 0 ? (tabletServersBatchSum.get() / numBatches.get()) : 0),
minTabletServersBatch.get(), maxTabletServersBatch.get()));
log.trace(String.format("tablets per batch : %,8.2f avg %,6d min %,6d max",
(float) (numBatches.get() != 0 ? (tabletBatchSum.get() / numBatches.get()) : 0),
minTabletBatch.get(), maxTabletBatch.get()));
log.trace("");
log.trace("SYSTEM STATISTICS");
log.trace(String.format("JVM GC Time : %,10.2f secs",
((finalGCTimes - initialGCTimes) / 1000.0)));
if (compMxBean.isCompilationTimeMonitoringSupported()) {
log.trace(String.format("JVM Compile Time : %,10.2f secs",
(finalCompileTimes - initialCompileTimes) / 1000.0));
}
log.trace(String.format("System load average : initial=%6.2f final=%6.2f", initialSystemLoad,
finalSystemLoad));
}
}
private void updateSendStats(long count, long time) {
totalSent.addAndGet(count);
totalSendTime.addAndGet(time);
}
public void updateBinningStats(int count, long time,
Map<String,TabletServerMutations<Mutation>> binnedMutations) {
if (log.isTraceEnabled()) {
totalBinTime.addAndGet(time);
totalBinned.addAndGet(count);
updateBatchStats(binnedMutations);
}
}
private static void computeMin(AtomicInteger stat, int update) {
int old = stat.get();
while (!stat.compareAndSet(old, Math.min(old, update))) {
old = stat.get();
}
}
private static void computeMax(AtomicInteger stat, int update) {
int old = stat.get();
while (!stat.compareAndSet(old, Math.max(old, update))) {
old = stat.get();
}
}
private void updateBatchStats(Map<String,TabletServerMutations<Mutation>> binnedMutations) {
tabletServersBatchSum.addAndGet(binnedMutations.size());
computeMin(minTabletServersBatch, binnedMutations.size());
computeMax(maxTabletServersBatch, binnedMutations.size());
int numTablets = 0;
for (Entry<String,TabletServerMutations<Mutation>> entry : binnedMutations.entrySet()) {
TabletServerMutations<Mutation> tsm = entry.getValue();
numTablets += tsm.getMutations().size();
}
tabletBatchSum.addAndGet(numTablets);
computeMin(minTabletBatch, numTablets);
computeMax(maxTabletBatch, numTablets);
numBatches.incrementAndGet();
}
private interface WaitCondition {
boolean shouldWait();
}
private void waitRTE(WaitCondition condition) {
try {
while (condition.shouldWait()) {
wait();
}
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
// BEGIN code for handling unrecoverable errors
private void updatedConstraintViolations(List<ConstraintViolationSummary> cvsList) {
if (!cvsList.isEmpty()) {
synchronized (this) {
somethingFailed.set(true);
violations.add(cvsList);
this.notifyAll();
}
}
}
private void updateAuthorizationFailures(Map<KeyExtent,SecurityErrorCode> authorizationFailures) {
if (!authorizationFailures.isEmpty()) {
// was a table deleted?
context.clearTableListCache();
authorizationFailures.keySet().stream().map(KeyExtent::tableId)
.forEach(context::requireNotDeleted);
synchronized (this) {
somethingFailed.set(true);
// add these authorizationFailures to those collected by this batch writer
authorizationFailures.forEach((ke, code) -> this.authorizationFailures
.computeIfAbsent(ke, k -> new HashSet<>()).add(code));
this.notifyAll();
}
}
}
private synchronized void updateServerErrors(String server, Exception e) {
somethingFailed.set(true);
this.serverSideErrors.add(server);
this.notifyAll();
log.error("Server side error on {}", server, e);
}
private synchronized void updateUnknownErrors(String msg, Exception t) {
somethingFailed.set(true);
unknownErrors++;
this.lastUnknownError = t;
this.notifyAll();
if (t instanceof TableDeletedException || t instanceof TableOfflineException
|| t instanceof TimedOutException) {
log.debug("{}", msg, t); // this is not unknown
} else {
log.error("{}", msg, t);
}
}
private void checkForFailures() throws MutationsRejectedException {
if (somethingFailed.get()) {
List<ConstraintViolationSummary> cvsList = violations.asList();
HashMap<TabletId,Set<org.apache.accumulo.core.client.security.SecurityErrorCode>> af =
new HashMap<>();
for (Entry<KeyExtent,Set<SecurityErrorCode>> entry : authorizationFailures.entrySet()) {
HashSet<org.apache.accumulo.core.client.security.SecurityErrorCode> codes = new HashSet<>();
for (SecurityErrorCode sce : entry.getValue()) {
codes.add(org.apache.accumulo.core.client.security.SecurityErrorCode.valueOf(sce.name()));
}
af.put(new TabletIdImpl(entry.getKey()), codes);
}
throw new MutationsRejectedException(context, cvsList, af, serverSideErrors, unknownErrors,
lastUnknownError);
}
}
// END code for handling unrecoverable errors
// BEGIN code for handling failed mutations
/**
* Add mutations that previously failed back into the mix
*/
private synchronized void addFailedMutations(MutationSet failedMutations) {
mutations.addAll(failedMutations);
if (mutations.getMemoryUsed() >= maxMem / 2 || closed || flushing) {
startProcessing();
}
}
private class FailedMutations {
private MutationSet recentFailures = null;
private long initTime;
private final Runnable task;
private final ScheduledFuture<?> future;
FailedMutations() {
task =
Threads.createNamedRunnable("failed mutationBatchWriterLatencyTimers handler", this::run);
future = executor.scheduleWithFixedDelay(task, 0, 500, MILLISECONDS);
}
private MutationSet init() {
ThreadPools.ensureRunning(future,
"Background task that re-queues failed mutations has exited.");
if (recentFailures == null) {
recentFailures = new MutationSet();
initTime = System.currentTimeMillis();
}
return recentFailures;
}
synchronized void add(TableId table, ArrayList<Mutation> tableFailures) {
init().addAll(table, tableFailures);
}
synchronized void add(MutationSet failures) {
init().addAll(failures);
}
synchronized void add(TabletServerMutations<Mutation> tsm) {
init();
tsm.getMutations().forEach((ke, muts) -> recentFailures.addAll(ke.tableId(), muts));
}
public void run() {
try {
MutationSet rf = null;
synchronized (this) {
if (recentFailures != null && System.currentTimeMillis() - initTime > 1000) {
rf = recentFailures;
recentFailures = null;
}
}
if (rf != null) {
if (log.isTraceEnabled()) {
log.trace("tid={} Requeuing {} failed mutations", Thread.currentThread().getId(),
rf.size());
}
addFailedMutations(rf);
}
} catch (Exception t) {
updateUnknownErrors("tid=" + Thread.currentThread().getId()
+ " Failed to requeue failed mutations " + t.getMessage(), t);
executor.remove(task);
}
}
}
// END code for handling failed mutations
// BEGIN code for sending mutations to tablet servers using background threads
private class MutationWriter {
private static final int MUTATION_BATCH_SIZE = 1 << 17;
private final ThreadPoolExecutor sendThreadPool;
private final ThreadPoolExecutor binningThreadPool;
private final Map<String,TabletServerMutations<Mutation>> serversMutations;
private final Set<String> queued;
private final Map<TableId,TabletLocator> locators;
public MutationWriter(int numSendThreads) {
serversMutations = new HashMap<>();
queued = new HashSet<>();
sendThreadPool = context.threadPools().createFixedThreadPool(numSendThreads,
this.getClass().getName(), false);
locators = new HashMap<>();
binningThreadPool = context.threadPools().createFixedThreadPool(1, "BinMutations",
new SynchronousQueue<>(), false);
binningThreadPool.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
}
private synchronized TabletLocator getLocator(TableId tableId) {
TabletLocator ret = locators.get(tableId);
if (ret == null) {
ret = new TimeoutTabletLocator(timeout, context, tableId);
locators.put(tableId, ret);
}
return ret;
}
private void binMutations(MutationSet mutationsToProcess,
Map<String,TabletServerMutations<Mutation>> binnedMutations) {
TableId tableId = null;
try {
Set<Entry<TableId,List<Mutation>>> es = mutationsToProcess.getMutations().entrySet();
for (Entry<TableId,List<Mutation>> entry : es) {
tableId = entry.getKey();
TabletLocator locator = getLocator(tableId);
List<Mutation> tableMutations = entry.getValue();
if (tableMutations != null) {
ArrayList<Mutation> tableFailures = new ArrayList<>();
locator.binMutations(context, tableMutations, binnedMutations, tableFailures);
if (!tableFailures.isEmpty()) {
failedMutations.add(tableId, tableFailures);
if (tableFailures.size() == tableMutations.size()) {
context.requireNotDeleted(tableId);
context.requireNotOffline(tableId, null);
}
}
}
}
return;
} catch (AccumuloServerException ase) {
updateServerErrors(ase.getServer(), ase);
} catch (AccumuloException ae) {
// assume an IOError communicating with metadata tablet
failedMutations.add(mutationsToProcess);
} catch (AccumuloSecurityException e) {
updateAuthorizationFailures(Collections.singletonMap(new KeyExtent(tableId, null, null),
SecurityErrorCode.valueOf(e.getSecurityErrorCode().name())));
} catch (TableDeletedException | TableNotFoundException | TableOfflineException e) {
updateUnknownErrors(e.getMessage(), e);
}
// an error occurred
binnedMutations.clear();
}
void queueMutations(final MutationSet mutationsToSend) {
if (mutationsToSend == null) {
return;
}
binningThreadPool.execute(() -> {
try {
log.trace("{} - binning {} mutations", Thread.currentThread().getName(),
mutationsToSend.size());
addMutations(mutationsToSend);
} catch (Exception e) {
updateUnknownErrors("Error processing mutation set", e);
}
});
}
private void addMutations(MutationSet mutationsToSend) {
Map<String,TabletServerMutations<Mutation>> binnedMutations = new HashMap<>();
Span span = TraceUtil.startSpan(this.getClass(), "binMutations");
try (Scope scope = span.makeCurrent()) {
long t1 = System.currentTimeMillis();
binMutations(mutationsToSend, binnedMutations);
long t2 = System.currentTimeMillis();
updateBinningStats(mutationsToSend.size(), (t2 - t1), binnedMutations);
} catch (Exception e) {
TraceUtil.setException(span, e, true);
throw e;
} finally {
span.end();
}
addMutations(binnedMutations);
}
private synchronized void
addMutations(Map<String,TabletServerMutations<Mutation>> binnedMutations) {
int count = 0;
// merge mutations into existing mutations for a tablet server
for (Entry<String,TabletServerMutations<Mutation>> entry : binnedMutations.entrySet()) {
String server = entry.getKey();
TabletServerMutations<Mutation> currentMutations = serversMutations.get(server);
if (currentMutations == null) {
serversMutations.put(server, entry.getValue());
} else {
for (Entry<KeyExtent,List<Mutation>> entry2 : entry.getValue().getMutations()
.entrySet()) {
for (Mutation m : entry2.getValue()) {
currentMutations.addMutation(entry2.getKey(), m);
}
}
}
if (log.isTraceEnabled()) {
for (Entry<KeyExtent,List<Mutation>> entry2 : entry.getValue().getMutations()
.entrySet()) {
count += entry2.getValue().size();
}
}
}
if (count > 0 && log.isTraceEnabled()) {
log.trace(String.format("Started sending %,d mutations to %,d tablet servers", count,
binnedMutations.keySet().size()));
}
// randomize order of servers
ArrayList<String> servers = new ArrayList<>(binnedMutations.keySet());
Collections.shuffle(servers);
for (String server : servers) {
if (!queued.contains(server)) {
sendThreadPool.execute(new SendTask(server));
queued.add(server);
}
}
}
private synchronized TabletServerMutations<Mutation> getMutationsToSend(String server) {
TabletServerMutations<Mutation> tsmuts = serversMutations.remove(server);
if (tsmuts == null) {
queued.remove(server);
}
return tsmuts;
}
class SendTask implements Runnable {
private final String location;
SendTask(String server) {
this.location = server;
}
@Override
public void run() {
try {
TabletServerMutations<Mutation> tsmuts = getMutationsToSend(location);
while (tsmuts != null) {
send(tsmuts);
tsmuts = getMutationsToSend(location);
}
} catch (Exception t) {
updateUnknownErrors(
"Failed to send tablet server " + location + " its batch : " + t.getMessage(), t);
}
}
public void send(TabletServerMutations<Mutation> tsm)
throws AccumuloServerException, AccumuloSecurityException {
MutationSet failures = null;
String oldName = Thread.currentThread().getName();
Map<KeyExtent,List<Mutation>> mutationBatch = tsm.getMutations();
try {
long count = 0;
Set<TableId> tableIds = new TreeSet<>();
for (Map.Entry<KeyExtent,List<Mutation>> entry : mutationBatch.entrySet()) {
count += entry.getValue().size();
tableIds.add(entry.getKey().tableId());
}
String msg = "sending " + String.format("%,d", count) + " mutations to "
+ String.format("%,d", mutationBatch.size()) + " tablets at " + location + " tids: ["
+ Joiner.on(',').join(tableIds) + ']';
Thread.currentThread().setName(msg);
Span span = TraceUtil.startSpan(this.getClass(), "sendMutations");
try (Scope scope = span.makeCurrent()) {
TimeoutTracker timeoutTracker = timeoutTrackers.get(location);
if (timeoutTracker == null) {
timeoutTracker = new TimeoutTracker(location, timeout);
timeoutTrackers.put(location, timeoutTracker);
}
long st1 = System.currentTimeMillis();
try (SessionCloser sessionCloser = new SessionCloser(location)) {
failures = sendMutationsToTabletServer(location, mutationBatch, timeoutTracker,
sessionCloser);
} catch (ThriftSecurityException e) {
updateAuthorizationFailures(
mutationBatch.keySet().stream().collect(toMap(identity(), ke -> e.code)));
throw new AccumuloSecurityException(e.user, e.code, e);
}
long st2 = System.currentTimeMillis();
if (log.isTraceEnabled()) {
log.trace("sent " + String.format("%,d", count) + " mutations to " + location + " in "
+ String.format("%.2f secs (%,.2f mutations/sec) with %,d failures",
(st2 - st1) / 1000.0, count / ((st2 - st1) / 1000.0), failures.size()));
}
long successBytes = 0;
for (Entry<KeyExtent,List<Mutation>> entry : mutationBatch.entrySet()) {
for (Mutation mutation : entry.getValue()) {
successBytes += mutation.estimatedMemoryUsed();
}
}
if (failures.size() > 0) {
failedMutations.add(failures);
successBytes -= failures.getMemoryUsed();
}
updateSendStats(count, st2 - st1);
decrementMemUsed(successBytes);
} catch (Exception e) {
TraceUtil.setException(span, e, true);
throw e;
} finally {
span.end();
}
} catch (IOException e) {
log.debug("failed to send mutations to {}", location, e);
HashSet<TableId> tables = new HashSet<>();
for (KeyExtent ke : mutationBatch.keySet()) {
tables.add(ke.tableId());
}
for (TableId table : tables) {
getLocator(table).invalidateCache(context, location);
}
failedMutations.add(tsm);
} finally {
Thread.currentThread().setName(oldName);
}
}
}
private MutationSet sendMutationsToTabletServer(String location,
Map<KeyExtent,List<Mutation>> tabMuts, TimeoutTracker timeoutTracker,
SessionCloser sessionCloser)
throws IOException, AccumuloSecurityException, AccumuloServerException {
if (tabMuts.isEmpty()) {
return new MutationSet();
}
TInfo tinfo = TraceUtil.traceInfo();
timeoutTracker.startingWrite();
// If there is an open session, must close it before the batchwriter closes or writes could
// happen after the batch writer closes. See #3721
try {
final HostAndPort parsedServer = HostAndPort.fromString(location);
final TabletIngestClientService.Iface client;
if (timeoutTracker.getTimeOut() < context.getClientTimeoutInMillis()) {
client = ThriftUtil.getClient(ThriftClientTypes.TABLET_INGEST, parsedServer, context,
timeoutTracker.getTimeOut());
} else {
client = ThriftUtil.getClient(ThriftClientTypes.TABLET_INGEST, parsedServer, context);
}
try {
MutationSet allFailures = new MutationSet();
// set the session on the sessionCloser so that any failures after this point will close
// the session if needed
sessionCloser.setSession(
client.startUpdate(tinfo, context.rpcCreds(), DurabilityImpl.toThrift(durability)));
List<TMutation> updates = new ArrayList<>();
for (Entry<KeyExtent,List<Mutation>> entry : tabMuts.entrySet()) {
long size = 0;
Iterator<Mutation> iter = entry.getValue().iterator();
while (iter.hasNext()) {
while (size < MUTATION_BATCH_SIZE && iter.hasNext()) {
Mutation mutation = iter.next();
updates.add(mutation.toThrift());
size += mutation.numBytes();
}
client.applyUpdates(tinfo, sessionCloser.getSession(), entry.getKey().toThrift(),
updates);
updates.clear();
size = 0;
}
}
UpdateErrors updateErrors = client.closeUpdate(tinfo, sessionCloser.getSession());
// the write completed successfully so no need to close the session
sessionCloser.clearSession();
// @formatter:off
Map<KeyExtent,Long> failures = updateErrors.failedExtents.entrySet().stream().collect(toMap(
entry -> KeyExtent.fromThrift(entry.getKey()),
Entry::getValue
));
// @formatter:on
updatedConstraintViolations(updateErrors.violationSummaries.stream()
.map(ConstraintViolationSummary::new).collect(toList()));
// @formatter:off
updateAuthorizationFailures(updateErrors.authorizationFailures.entrySet().stream().collect(toMap(
entry -> KeyExtent.fromThrift(entry.getKey()),
Entry::getValue
)));
// @formatter:on
long totalCommitted = 0;
for (Entry<KeyExtent,Long> entry : failures.entrySet()) {
KeyExtent failedExtent = entry.getKey();
int numCommitted = (int) (long) entry.getValue();
totalCommitted += numCommitted;
TableId tableId = failedExtent.tableId();
getLocator(tableId).invalidateCache(failedExtent);
List<Mutation> mutations = tabMuts.get(failedExtent);
allFailures.addAll(tableId, mutations.subList(numCommitted, mutations.size()));
}
if (failures.keySet().containsAll(tabMuts.keySet()) && totalCommitted == 0) {
// nothing was successfully written
timeoutTracker.wroteNothing();
} else {
// successfully wrote something to tablet server
timeoutTracker.madeProgress();
}
return allFailures;
} finally {
ThriftUtil.returnClient((TServiceClient) client, context);
}
} catch (TTransportException e) {
timeoutTracker.errorOccured();
throw new IOException(e);
} catch (TApplicationException tae) {
// no need to close the session when unretryable errors happen
sessionCloser.clearSession();
updateServerErrors(location, tae);
throw new AccumuloServerException(location, tae);
} catch (ThriftSecurityException e) {
// no need to close the session when unretryable errors happen
sessionCloser.clearSession();
updateAuthorizationFailures(
tabMuts.keySet().stream().collect(toMap(identity(), ke -> e.code)));
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (TException e) {
throw new IOException(e);
}
}
class SessionCloser implements AutoCloseable {
private final String location;
private OptionalLong usid;
SessionCloser(String location) {
this.location = location;
usid = OptionalLong.empty();
}
void setSession(long usid) {
this.usid = OptionalLong.of(usid);
}
public long getSession() {
return usid.getAsLong();
}
void clearSession() {
usid = OptionalLong.empty();
}
@Override
public void close() throws ThriftSecurityException {
if (usid.isPresent()) {
try {
cancelSession();
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
}
/**
* Checks if there is a lock held by a tserver at a specific host and port.
*/
private boolean isALockHeld(String tserver) {
var root = context.getZooKeeperRoot() + Constants.ZTSERVERS;
var zLockPath = ServiceLock.path(root + "/" + tserver);
return ServiceLock.getSessionId(context.getZooCache(), zLockPath) != 0;
}
private void cancelSession() throws InterruptedException, ThriftSecurityException {
Retry retry = Retry.builder().infiniteRetries().retryAfter(100, MILLISECONDS)
.incrementBy(100, MILLISECONDS).maxWait(60, SECONDS).backOffFactor(1.5)
.logInterval(3, MINUTES).createRetry();
final HostAndPort parsedServer = HostAndPort.fromString(location);
long startTime = System.nanoTime();
boolean useCloseUpdate = false;
// If somethingFailed is true then the batch writer will throw an exception on close or
// flush, so no need to close this session. Only want to close the session for retryable
// exceptions.
while (!somethingFailed.get()) {
TabletIngestClientService.Client client = null;
// Check if a lock is held by any tserver at the host and port. It does not need to be the
// exact tserver instance that existed when the session was created because if a new
// tserver instance comes up then the session will not exist there. Trying to get the
// exact tserver instance that created the session would require changes to the RPC that
// creates the session and this is not needed.
if (!isALockHeld(location)) {
retry.logCompletion(log,
"No tserver for failed write session " + location + " " + usid);
break;
}
try {
if (timeout < context.getClientTimeoutInMillis()) {
client = ThriftUtil.getClient(ThriftClientTypes.TABLET_INGEST, parsedServer, context,
timeout);
} else {
client = ThriftUtil.getClient(ThriftClientTypes.TABLET_INGEST, parsedServer, context);
}
if (useCloseUpdate) {
// This compatability handling for accumulo version 2.1.2 and earlier that did not
// have cancelUpdate. Can remove this in 3.1.
client.closeUpdate(TraceUtil.traceInfo(), usid.getAsLong());
retry.logCompletion(log, "Closed failed write session " + location + " " + usid);
break;
} else {
if (client.cancelUpdate(TraceUtil.traceInfo(), usid.getAsLong())) {
retry.logCompletion(log, "Canceled failed write session " + location + " " + usid);
break;
} else {
retry.waitForNextAttempt(log,
"Attempting to cancel failed write session " + location + " " + usid);
}
}
} catch (NoSuchScanIDException e) {
retry.logCompletion(log,
"Failed write session no longer exists " + location + " " + usid);
// The session no longer exists, so done
break;
} catch (TApplicationException tae) {
if (tae.getType() == TApplicationException.UNKNOWN_METHOD && !useCloseUpdate) {
useCloseUpdate = true;
log.debug(
"Accumulo server {} does not have cancelUpdate, falling back to closeUpdate.",
location);
retry.waitForNextAttempt(log, "Attempting to cancel failed write session " + location
+ " " + usid + " " + tae.getMessage());
} else {
// no need to bother closing session in this case
updateServerErrors(location, tae);
break;
}
} catch (ThriftSecurityException e) {
throw e;
} catch (TException e) {
String op = useCloseUpdate ? "close" : "cancel";
retry.waitForNextAttempt(log, "Attempting to " + op + " failed write session "
+ location + " " + usid + " " + e.getMessage());
} finally {
ThriftUtil.returnClient(client, context);
}
// if a timeout is set on the batch writer, then do not retry longer than the timeout
if (TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime) > timeout) {
log.debug("Giving up on canceling session {} {} and timing out.", location, usid);
throw new TimedOutException(Set.of(location));
}
}
}
}
}
// END code for sending mutations to tablet servers using background threads
private static class MutationSet {
private final HashMap<TableId,List<Mutation>> mutations;
private long memoryUsed = 0;
MutationSet() {
mutations = new HashMap<>();
}
void addMutation(TableId table, Mutation mutation) {
mutations.computeIfAbsent(table, k -> new ArrayList<>()).add(mutation);
memoryUsed += mutation.estimatedMemoryUsed();
}
Map<TableId,List<Mutation>> getMutations() {
return mutations;
}
int size() {
int result = 0;
for (List<Mutation> perTable : mutations.values()) {
result += perTable.size();
}
return result;
}
public void addAll(MutationSet failures) {
Set<Entry<TableId,List<Mutation>>> es = failures.getMutations().entrySet();
for (Entry<TableId,List<Mutation>> entry : es) {
TableId table = entry.getKey();
for (Mutation mutation : entry.getValue()) {
addMutation(table, mutation);
}
}
}
public void addAll(TableId table, List<Mutation> mutations) {
for (Mutation mutation : mutations) {
addMutation(table, mutation);
}
}
public long getMemoryUsed() {
return memoryUsed;
}
}
}
| 9,850 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ScanServerAttemptsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.stream.Collectors.toUnmodifiableMap;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.spi.scan.ScanServerAttempt;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is used to track scan attempts for the ScanServerSelector. Its designed to accept
* updates concurrently (useful for the batch scanner) and offers a snapshot. When a snapshot is
* obtained it will not change, this class will still accept updates after generating a snapshot.
* Snapshots are useful for ensuring that authors of ScanServerSelector plugins do not have to
* consider strange concurrency issues when writing a plugin.
*/
public class ScanServerAttemptsImpl {
private static final Logger LOG = LoggerFactory.getLogger(ScanServerAttemptsImpl.class);
private final Map<TabletId,Collection<ScanServerAttemptImpl>> attempts = new HashMap<>();
ScanServerAttemptReporter createReporter(String server, TabletId tablet) {
return result -> {
LOG.trace("Received result: {}", result);
synchronized (attempts) {
attempts.computeIfAbsent(tablet, k -> new ArrayList<>())
.add(new ScanServerAttemptImpl(result, server));
}
};
}
/**
* Creates and returns a snapshot of {@link ScanServerAttempt} objects that were added before this
* call
*
* @return TabletIds mapped to a collection of {@link ScanServerAttempt} objects associated with
* that TabletId
*/
Map<TabletId,Collection<ScanServerAttemptImpl>> snapshot() {
synchronized (attempts) {
return attempts.entrySet().stream()
.collect(toUnmodifiableMap(Entry::getKey, entry -> List.copyOf(entry.getValue())));
}
}
}
| 9,851 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ThriftTransportPool.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.function.LongSupplier;
import java.util.function.Supplier;
import org.apache.accumulo.core.rpc.ThriftUtil;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.core.util.threads.Threads;
import org.apache.thrift.TConfiguration;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.net.HostAndPort;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class ThriftTransportPool {
private static final Logger log = LoggerFactory.getLogger(ThriftTransportPool.class);
private static final long ERROR_THRESHOLD = 20L;
private static final long STUCK_THRESHOLD = MINUTES.toMillis(2);
private final ConnectionPool connectionPool = new ConnectionPool();
private final Map<ThriftTransportKey,Long> errorCount = new HashMap<>();
private final Map<ThriftTransportKey,Long> errorTime = new HashMap<>();
private final Set<ThriftTransportKey> serversWarnedAbout = new HashSet<>();
private final Thread checkThread;
private final LongSupplier maxAgeMillis;
private ThriftTransportPool(LongSupplier maxAgeMillis) {
this.maxAgeMillis = maxAgeMillis;
this.checkThread = Threads.createThread("Thrift Connection Pool Checker", () -> {
try {
final long minNanos = MILLISECONDS.toNanos(250);
final long maxNanos = MINUTES.toNanos(1);
long lastRun = System.nanoTime();
while (!connectionPool.shutdown) {
// don't close on every loop; instead, check based on configured max age, within bounds
var threshold = Math.min(maxNanos,
Math.max(minNanos, MILLISECONDS.toNanos(maxAgeMillis.getAsLong()) / 2));
long currentNanos = System.nanoTime();
if ((currentNanos - lastRun) >= threshold) {
closeExpiredConnections();
lastRun = currentNanos;
}
// loop often, to detect shutdowns quickly
Thread.sleep(250);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (TransportPoolShutdownException e) {
log.debug("Error closing expired connections", e);
}
});
}
/**
* Create a new instance and start its checker thread, returning the instance.
*
* @param maxAgeMillis the supplier for the max age of idle transports before they are cleaned up
* @return a new instance with its checker thread started to clean up idle transports
*/
static ThriftTransportPool startNew(LongSupplier maxAgeMillis) {
var pool = new ThriftTransportPool(maxAgeMillis);
log.debug("Set thrift transport pool idle time to {}ms", maxAgeMillis.getAsLong());
pool.checkThread.start();
return pool;
}
public TTransport getTransport(HostAndPort location, long milliseconds, ClientContext context)
throws TTransportException {
ThriftTransportKey cacheKey = new ThriftTransportKey(location, milliseconds, context);
CachedConnection connection = connectionPool.reserveAny(cacheKey);
if (connection != null) {
log.trace("Using existing connection to {}", cacheKey.getServer());
return connection.transport;
} else {
return createNewTransport(cacheKey);
}
}
@VisibleForTesting
public Pair<String,TTransport> getAnyTransport(List<ThriftTransportKey> servers,
boolean preferCachedConnection) throws TTransportException {
servers = new ArrayList<>(servers);
if (preferCachedConnection) {
HashSet<ThriftTransportKey> serversSet = new HashSet<>(servers);
// randomly pick a server from the connection cache
serversSet.retainAll(connectionPool.getThriftTransportKeys());
if (!serversSet.isEmpty()) {
ArrayList<ThriftTransportKey> cachedServers = new ArrayList<>(serversSet);
Collections.shuffle(cachedServers, RANDOM.get());
for (ThriftTransportKey ttk : cachedServers) {
CachedConnection connection = connectionPool.reserveAny(ttk);
if (connection != null) {
final String serverAddr = ttk.getServer().toString();
log.trace("Using existing connection to {}", serverAddr);
return new Pair<>(serverAddr, connection.transport);
}
}
}
}
int retryCount = 0;
while (!servers.isEmpty() && retryCount < 10) {
int index = RANDOM.get().nextInt(servers.size());
ThriftTransportKey ttk = servers.get(index);
if (preferCachedConnection) {
CachedConnection connection = connectionPool.reserveAnyIfPresent(ttk);
if (connection != null) {
return new Pair<>(ttk.getServer().toString(), connection.transport);
}
}
try {
return new Pair<>(ttk.getServer().toString(), createNewTransport(ttk));
} catch (TTransportException tte) {
log.debug("Failed to connect to {}", servers.get(index), tte);
servers.remove(index);
retryCount++;
}
}
throw new TTransportException("Failed to connect to a server");
}
private TTransport createNewTransport(ThriftTransportKey cacheKey) throws TTransportException {
TTransport transport = ThriftUtil.createClientTransport(cacheKey.getServer(),
(int) cacheKey.getTimeout(), cacheKey.getSslParams(), cacheKey.getSaslParams());
log.trace("Creating new connection to connection to {}", cacheKey.getServer());
CachedTTransport tsc = new CachedTTransport(transport, cacheKey);
CachedConnection connection = new CachedConnection(tsc);
connection.reserve();
try {
connectionPool.putReserved(cacheKey, connection);
} catch (TransportPoolShutdownException e) {
connection.transport.close();
throw e;
}
return connection.transport;
}
public void returnTransport(TTransport transport) {
if (transport == null) {
return;
}
CachedTTransport cachedTransport = (CachedTTransport) transport;
ArrayList<CachedConnection> closeList = new ArrayList<>();
boolean existInCache = connectionPool.returnTransport(cachedTransport, closeList);
// close outside of sync block
closeList.forEach(connection -> {
try {
connection.transport.close();
} catch (Exception e) {
log.debug("Failed to close connection w/ errors", e);
}
});
if (cachedTransport.sawError) {
boolean shouldWarn = false;
long ecount;
synchronized (errorCount) {
ecount = errorCount.merge(cachedTransport.getCacheKey(), 1L, Long::sum);
// logs the first time an error occurred
errorTime.computeIfAbsent(cachedTransport.getCacheKey(), k -> System.currentTimeMillis());
if (ecount >= ERROR_THRESHOLD && serversWarnedAbout.add(cachedTransport.getCacheKey())) {
// boolean facilitates logging outside of lock
shouldWarn = true;
}
}
log.trace("Returned connection had error {}", cachedTransport.getCacheKey());
if (shouldWarn) {
log.warn("Server {} had {} failures in a short time period, will not complain anymore",
cachedTransport.getCacheKey(), ecount);
}
}
if (!existInCache) {
log.warn("Returned tablet server connection to cache that did not come from cache");
// close outside of sync block
transport.close();
}
}
private void closeExpiredConnections() {
List<CachedConnection> expiredConnections;
expiredConnections = connectionPool.removeExpiredConnections(maxAgeMillis);
synchronized (errorCount) {
Iterator<Entry<ThriftTransportKey,Long>> iter = errorTime.entrySet().iterator();
while (iter.hasNext()) {
Entry<ThriftTransportKey,Long> entry = iter.next();
long delta = System.currentTimeMillis() - entry.getValue();
if (delta >= STUCK_THRESHOLD) {
errorCount.remove(entry.getKey());
iter.remove();
}
}
}
// Close connections outside of sync block
expiredConnections.forEach(c -> c.transport.close());
}
void shutdown() {
connectionPool.shutdown();
try {
checkThread.join();
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
// INNER classes below here
private static class CachedConnections {
/*
* Items are added and removed from this queue in such a way that the queue is ordered from most
* recently used to least recently used. The first position being the most recently used and the
* last position being the least recently used. This is done in the following way.
*
* - Newly unreserved connections are be added using addFirst(). When a connection is added, its
* lastReturnTime is set.
*
* - When an unreserved connection is needed, its taken off using pollFirst().
*
* - Unreserved connections that haven been idle too long are removed using removeLast()
*
* The purpose of maintaining this ordering it to allow efficient removal of idle connection.
* The efficiency comes from avoiding a linear search for idle connection. Since this search is
* done by a background thread holding a lock, thats good for any thread attempting to reserve a
* connection.
*/
Deque<CachedConnection> unreserved = new ArrayDeque<>(); // stack - LIFO
Map<CachedTTransport,CachedConnection> reserved = new HashMap<>();
public CachedConnection reserveAny() {
CachedConnection cachedConnection = unreserved.pollFirst(); // safe pop
if (cachedConnection != null) {
cachedConnection.reserve();
reserved.put(cachedConnection.transport, cachedConnection);
if (log.isTraceEnabled()) {
log.trace("Using existing connection to {}", cachedConnection.transport.cacheKey);
}
}
return cachedConnection;
}
private void removeExpiredConnections(final ArrayList<CachedConnection> expired,
final LongSupplier maxAgeMillis) {
long currTime = System.currentTimeMillis();
while (isLastUnreservedExpired(currTime, maxAgeMillis)) {
expired.add(unreserved.removeLast());
}
}
boolean isLastUnreservedExpired(final long currTime, final LongSupplier maxAgeMillis) {
return !unreserved.isEmpty()
&& (currTime - unreserved.peekLast().lastReturnTime) > maxAgeMillis.getAsLong();
}
void checkReservedForStuckIO() {
reserved.values().forEach(c -> c.transport.checkForStuckIO(STUCK_THRESHOLD));
}
void closeAllTransports() {
closeTransports(unreserved);
closeTransports(reserved.values());
}
void closeTransports(final Iterable<CachedConnection> stream) {
stream.forEach(connection -> {
try {
connection.transport.close();
} catch (Exception e) {
log.debug("Error closing transport during shutdown", e);
}
});
}
CachedConnection removeReserved(CachedTTransport transport) {
return reserved.remove(transport);
}
}
private static class ConnectionPool {
final Lock[] locks;
final ConcurrentHashMap<ThriftTransportKey,CachedConnections> connections =
new ConcurrentHashMap<>();
private volatile boolean shutdown = false;
ConnectionPool() {
// intentionally using a prime number, don't use 31
locks = new Lock[37];
for (int i = 0; i < locks.length; i++) {
locks[i] = new ReentrantLock();
}
}
Set<ThriftTransportKey> getThriftTransportKeys() {
return connections.keySet();
}
/**
* Reserve and return a new {@link CachedConnection} from the {@link CachedConnections} mapped
* to the specified transport key. If a {@link CachedConnections} is not found, one will be
* created.
*
* <p>
*
* This operation locks access to the mapping for the key in {@link ConnectionPool#connections}
* until the operation completes.
*
* @param key the transport key
* @return the reserved {@link CachedConnection}
*/
CachedConnection reserveAny(final ThriftTransportKey key) {
// It's possible that multiple locks from executeWithinLock will overlap with a single lock
// inside the ConcurrentHashMap which can unnecessarily block threads. Access the
// ConcurrentHashMap outside of executeWithinLock to prevent this.
var connections = getOrCreateCachedConnections(key);
return executeWithinLock(key, connections::reserveAny);
}
/**
* Reserve and return a new {@link CachedConnection} from the {@link CachedConnections} mapped
* to the specified transport key. If a {@link CachedConnections} is not found, null will be
* returned.
*
* <p>
*
* This operation locks access to the mapping for the key in {@link ConnectionPool#connections}
* until the operation completes.
*
* @param key the transport key
* @return the reserved {@link CachedConnection}, or null if none were available.
*/
CachedConnection reserveAnyIfPresent(final ThriftTransportKey key) {
// It's possible that multiple locks from executeWithinLock will overlap with a single lock
// inside the ConcurrentHashMap which can unnecessarily block threads. Access the
// ConcurrentHashMap outside of executeWithinLock to prevent this.
var connections = getCachedConnections(key);
return connections == null ? null : executeWithinLock(key, connections::reserveAny);
}
/**
* Puts the specified connection into the reserved map of the {@link CachedConnections} for the
* specified transport key. If a {@link CachedConnections} is not found, one will be created.
*
* <p>
*
* This operation locks access to the mapping for the key in {@link ConnectionPool#connections}
* until the operation completes.
*
* @param key the transport key
* @param connection the reserved connection
*/
void putReserved(final ThriftTransportKey key, final CachedConnection connection) {
// It's possible that multiple locks from executeWithinLock will overlap with a single lock
// inside the ConcurrentHashMap which can unnecessarily block threads. Access the
// ConcurrentHashMap outside of executeWithinLock to prevent this.
var connections = getOrCreateCachedConnections(key);
executeWithinLock(key, () -> connections.reserved.put(connection.transport, connection));
}
/**
* Returns the connection for the specified transport back to the queue of unreserved
* connections for the {@link CachedConnections} for the specified transport's key. If a
* {@link CachedConnections} is not found, one will be created. If the transport saw an error,
* the connection for the transport will be unreserved, and it and all other unreserved
* connections will be added to the specified toBeClosed list, and the connections' unreserved
* list will be cleared.
*
* <p>
*
* This operation locks access to the mapping for the key in {@link ConnectionPool#connections}
* until the operation completes.
*
* @param transport the transport
* @param toBeClosed the list to add connections that must be closed after this operation
* finishes
* @return true if the connection for the transport existed and was initially reserved, or false
* otherwise
*/
boolean returnTransport(final CachedTTransport transport,
final List<CachedConnection> toBeClosed) {
// It's possible that multiple locks from executeWithinLock will overlap with a single lock
// inside the ConcurrentHashMap which can unnecessarily block threads. Access the
// ConcurrentHashMap outside of executeWithinLock to prevent this.
var connections = getOrCreateCachedConnections(transport.getCacheKey());
return executeWithinLock(transport.getCacheKey(),
() -> unreserveConnection(transport, connections, toBeClosed)); // inline
}
@SuppressFBWarnings(value = "UL_UNRELEASED_LOCK",
justification = "FindBugs doesn't recognize that all locks in ConnectionPool.locks are subsequently unlocked in the try-finally in ConnectionPool.shutdown()")
void shutdown() {
// Obtain all locks.
for (Lock lock : locks) {
lock.lock();
}
// All locks are now acquired, so nothing else should be able to run concurrently...
try {
// Check if an shutdown has already been initiated.
if (shutdown) {
return;
}
shutdown = true;
connections.values().forEach(CachedConnections::closeAllTransports);
} finally {
for (Lock lock : locks) {
lock.unlock();
}
}
}
<T> T executeWithinLock(final ThriftTransportKey key, Supplier<T> function) {
Lock lock = getLock(key);
try {
return function.get();
} finally {
lock.unlock();
}
}
void executeWithinLock(final ThriftTransportKey key, Consumer<ThriftTransportKey> consumer) {
Lock lock = getLock(key);
try {
consumer.accept(key);
} finally {
lock.unlock();
}
}
Lock getLock(final ThriftTransportKey key) {
Lock lock = locks[(key.hashCode() & Integer.MAX_VALUE) % locks.length];
lock.lock();
if (shutdown) {
lock.unlock();
throw new TransportPoolShutdownException(
"The Accumulo singleton for connection pooling is disabled. This is likely caused by "
+ "all AccumuloClients being closed or garbage collected.");
}
return lock;
}
CachedConnections getCachedConnections(final ThriftTransportKey key) {
return connections.get(key);
}
CachedConnections getOrCreateCachedConnections(final ThriftTransportKey key) {
return connections.computeIfAbsent(key, k -> new CachedConnections());
}
boolean unreserveConnection(final CachedTTransport transport,
final CachedConnections connections, final List<CachedConnection> toBeClosed) {
if (connections != null) {
CachedConnection connection = connections.removeReserved(transport);
if (connection != null) {
if (transport.sawError) {
unreserveConnectionAndClearUnreserved(connections, connection, toBeClosed);
} else {
returnConnectionToUnreserved(connections, connection);
}
return true;
}
}
return false;
}
void unreserveConnectionAndClearUnreserved(final CachedConnections connections,
final CachedConnection connection, final List<CachedConnection> toBeClosed) {
toBeClosed.add(connection);
connection.unreserve();
// Remove all unreserved cached connection when a sever has an error, not just the
// connection that was returned.
toBeClosed.addAll(connections.unreserved);
connections.unreserved.clear();
}
void returnConnectionToUnreserved(final CachedConnections connections,
final CachedConnection connection) {
log.trace("Returned connection {} ioCount: {}", connection.transport.getCacheKey(),
connection.transport.ioCount);
connection.lastReturnTime = System.currentTimeMillis();
connection.unreserve();
// Using LIFO ensures that when the number of pooled connections exceeds the working
// set size that the idle times at the end of the list grow. The connections with
// large idle times will be cleaned up. Using a FIFO could continually reset the idle
// times of all connections, even when there are more than the working set size.
connections.unreserved.addFirst(connection);
}
List<CachedConnection> removeExpiredConnections(final LongSupplier maxAgeMillis) {
ArrayList<CachedConnection> expired = new ArrayList<>();
for (Entry<ThriftTransportKey,CachedConnections> entry : connections.entrySet()) {
CachedConnections connections = entry.getValue();
executeWithinLock(entry.getKey(), key -> {
connections.removeExpiredConnections(expired, maxAgeMillis);
connections.checkReservedForStuckIO();
});
}
return expired;
}
}
private static class CachedConnection {
public CachedConnection(CachedTTransport t) {
this.transport = t;
}
void reserve() {
Preconditions.checkState(!this.transport.reserved);
this.transport.setReserved(true);
}
void unreserve() {
Preconditions.checkState(this.transport.reserved);
this.transport.setReserved(false);
}
final CachedTTransport transport;
long lastReturnTime;
}
public static class TransportPoolShutdownException extends RuntimeException {
public TransportPoolShutdownException(String msg) {
super(msg);
}
private static final long serialVersionUID = 1L;
}
private static class CachedTTransport extends TTransport {
private final ThriftTransportKey cacheKey;
private final TTransport wrappedTransport;
private boolean sawError = false;
private volatile String ioThreadName = null;
private volatile long ioStartTime = 0;
private volatile boolean reserved = false;
private String stuckThreadName = null;
int ioCount = 0;
int lastIoCount = -1;
private void sawError() {
sawError = true;
}
final void setReserved(boolean reserved) {
this.reserved = reserved;
if (reserved) {
ioThreadName = Thread.currentThread().getName();
ioCount = 0;
lastIoCount = -1;
} else {
if ((ioCount & 1) == 1) {
// connection unreserved, but it seems io may still be happening
log.warn("Connection returned to thrift connection pool that may still be in use {} {}",
ioThreadName, Thread.currentThread().getName(), new Exception());
}
ioCount = 0;
lastIoCount = -1;
ioThreadName = null;
}
checkForStuckIO(STUCK_THRESHOLD);
}
final void checkForStuckIO(long threshold) {
// checking for stuck io needs to be light weight.
// Tried to call System.currentTimeMillis() and Thread.currentThread() before every io
// operation.... this dramatically slowed things down. So switched to
// incrementing a counter before and after each io operation.
if ((ioCount & 1) == 1) {
// when ioCount is odd, it means I/O is currently happening
if (ioCount == lastIoCount) {
// still doing same I/O operation as last time this
// functions was called
long delta = System.currentTimeMillis() - ioStartTime;
if (delta >= threshold && stuckThreadName == null) {
stuckThreadName = ioThreadName;
log.warn("Thread \"{}\" stuck on IO to {} for at least {} ms", ioThreadName, cacheKey,
delta);
}
} else {
// remember this ioCount and the time we saw it, need to see
// if it changes
lastIoCount = ioCount;
ioStartTime = System.currentTimeMillis();
if (stuckThreadName != null) {
// doing I/O, but ioCount changed so no longer stuck
log.info("Thread \"{}\" no longer stuck on IO to {} sawError = {}", stuckThreadName,
cacheKey, sawError);
stuckThreadName = null;
}
}
} else {
// I/O is not currently happening
if (stuckThreadName != null) {
// no longer stuck, and was stuck in the past
log.info("Thread \"{}\" no longer stuck on IO to {} sawError = {}", stuckThreadName,
cacheKey, sawError);
stuckThreadName = null;
}
}
}
public CachedTTransport(TTransport transport, ThriftTransportKey cacheKey2) {
this.wrappedTransport = transport;
this.cacheKey = cacheKey2;
}
@Override
public boolean isOpen() {
return wrappedTransport.isOpen();
}
@Override
public void open() throws TTransportException {
try {
ioCount++;
wrappedTransport.open();
} catch (TTransportException tte) {
sawError();
throw tte;
} finally {
ioCount++;
}
}
@Override
public int read(byte[] arg0, int arg1, int arg2) throws TTransportException {
try {
ioCount++;
return wrappedTransport.read(arg0, arg1, arg2);
} catch (TTransportException tte) {
sawError();
throw tte;
} finally {
ioCount++;
}
}
@Override
public int readAll(byte[] arg0, int arg1, int arg2) throws TTransportException {
try {
ioCount++;
return wrappedTransport.readAll(arg0, arg1, arg2);
} catch (TTransportException tte) {
sawError();
throw tte;
} finally {
ioCount++;
}
}
@Override
public void write(byte[] arg0, int arg1, int arg2) throws TTransportException {
try {
ioCount++;
wrappedTransport.write(arg0, arg1, arg2);
} catch (TTransportException tte) {
sawError();
throw tte;
} finally {
ioCount++;
}
}
@Override
public void write(byte[] arg0) throws TTransportException {
try {
ioCount++;
wrappedTransport.write(arg0);
} catch (TTransportException tte) {
sawError();
throw tte;
} finally {
ioCount++;
}
}
@Override
public void close() {
try {
ioCount++;
if (wrappedTransport.isOpen()) {
wrappedTransport.close();
}
} finally {
ioCount++;
}
}
@Override
public void flush() throws TTransportException {
try {
ioCount++;
wrappedTransport.flush();
} catch (TTransportException tte) {
sawError();
throw tte;
} finally {
ioCount++;
}
}
@Override
public boolean peek() {
try {
ioCount++;
return wrappedTransport.peek();
} finally {
ioCount++;
}
}
@Override
public byte[] getBuffer() {
try {
ioCount++;
return wrappedTransport.getBuffer();
} finally {
ioCount++;
}
}
@Override
public int getBufferPosition() {
try {
ioCount++;
return wrappedTransport.getBufferPosition();
} finally {
ioCount++;
}
}
@Override
public int getBytesRemainingInBuffer() {
try {
ioCount++;
return wrappedTransport.getBytesRemainingInBuffer();
} finally {
ioCount++;
}
}
@Override
public void consumeBuffer(int len) {
try {
ioCount++;
wrappedTransport.consumeBuffer(len);
} finally {
ioCount++;
}
}
@Override
public TConfiguration getConfiguration() {
return wrappedTransport.getConfiguration();
}
@Override
public void updateKnownMessageSize(long size) throws TTransportException {
try {
ioCount++;
wrappedTransport.updateKnownMessageSize(size);
} finally {
ioCount++;
}
}
@Override
public void checkReadBytesAvailable(long numBytes) throws TTransportException {
try {
ioCount++;
wrappedTransport.checkReadBytesAvailable(numBytes);
} finally {
ioCount++;
}
}
public ThriftTransportKey getCacheKey() {
return cacheKey;
}
}
}
| 9,852 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientInfoImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Properties;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.hadoop.conf.Configuration;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class ClientInfoImpl implements ClientInfo {
private final Properties properties;
private AuthenticationToken token;
private final Configuration hadoopConf;
public ClientInfoImpl(Path propertiesFile) {
this(ClientInfoImpl.toProperties(propertiesFile));
}
public ClientInfoImpl(URL propertiesURL) {
this(ClientInfoImpl.toProperties(propertiesURL));
}
public ClientInfoImpl(Properties properties) {
this(properties, null);
}
public ClientInfoImpl(Properties properties, AuthenticationToken token) {
this.properties = properties;
this.token = token;
this.hadoopConf = new Configuration();
}
@Override
public String getInstanceName() {
return getString(ClientProperty.INSTANCE_NAME);
}
@Override
public String getZooKeepers() {
return getString(ClientProperty.INSTANCE_ZOOKEEPERS);
}
@Override
public int getZooKeepersSessionTimeOut() {
return (int) ConfigurationTypeHelper
.getTimeInMillis(ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.getValue(properties));
}
@Override
public String getPrincipal() {
return getString(ClientProperty.AUTH_PRINCIPAL);
}
@Override
public Properties getProperties() {
Properties result = new Properties();
properties.forEach((key, value) -> result.setProperty((String) key, (String) value));
return result;
}
@Override
public AuthenticationToken getAuthenticationToken() {
if (token == null) {
token = ClientProperty.getAuthenticationToken(properties);
}
return token;
}
@Override
public boolean saslEnabled() {
return Boolean.valueOf(getString(ClientProperty.SASL_ENABLED));
}
private String getString(ClientProperty property) {
return property.getValue(properties);
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "code runs in same security context as user who provided propertiesFilePath")
public static Properties toProperties(String propertiesFilePath) {
return toProperties(Paths.get(propertiesFilePath));
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "code runs in same security context as user who provided propertiesFile")
public static Properties toProperties(Path propertiesFile) {
Properties properties = new Properties();
try (InputStream is = new FileInputStream(propertiesFile.toFile())) {
properties.load(is);
} catch (IOException e) {
throw new IllegalArgumentException("Failed to load properties from " + propertiesFile, e);
}
return properties;
}
@SuppressFBWarnings(value = "URLCONNECTION_SSRF_FD",
justification = "code runs in same security context as user who provided propertiesURL")
public static Properties toProperties(URL propertiesURL) {
Properties properties = new Properties();
try (InputStream is = propertiesURL.openStream()) {
properties.load(is);
} catch (IOException e) {
throw new IllegalArgumentException("Failed to load properties from " + propertiesURL, e);
}
return properties;
}
@Override
public Configuration getHadoopConf() {
return this.hadoopConf;
}
}
| 9,853 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/Namespaces.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.function.BiConsumer;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.NamespaceNotFoundException;
import org.apache.accumulo.core.data.NamespaceId;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.fate.zookeeper.ZooCache;
import org.apache.accumulo.core.util.tables.TableNameUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Namespaces {
private static final Logger log = LoggerFactory.getLogger(Namespaces.class);
public static boolean exists(ClientContext context, NamespaceId namespaceId) {
ZooCache zc = context.getZooCache();
List<String> namespaceIds = zc.getChildren(context.getZooKeeperRoot() + Constants.ZNAMESPACES);
return namespaceIds.contains(namespaceId.canonical());
}
public static List<TableId> getTableIds(ClientContext context, NamespaceId namespaceId)
throws NamespaceNotFoundException {
String namespace = getNamespaceName(context, namespaceId);
List<TableId> tableIds = new LinkedList<>();
for (Entry<String,TableId> nameToId : context.getTableNameToIdMap().entrySet()) {
if (namespace.equals(TableNameUtil.qualify(nameToId.getKey()).getFirst())) {
tableIds.add(nameToId.getValue());
}
}
return tableIds;
}
public static List<String> getTableNames(ClientContext context, NamespaceId namespaceId)
throws NamespaceNotFoundException {
String namespace = getNamespaceName(context, namespaceId);
List<String> names = new LinkedList<>();
for (String name : context.getTableNameToIdMap().keySet()) {
if (namespace.equals(TableNameUtil.qualify(name).getFirst())) {
names.add(name);
}
}
return names;
}
/**
* Gets all the namespaces from ZK. The first arg (t) the BiConsumer accepts is the ID and the
* second (u) is the namespaceName.
*/
private static void getAllNamespaces(ClientContext context,
BiConsumer<String,String> biConsumer) {
final ZooCache zc = context.getZooCache();
List<String> namespaceIds = zc.getChildren(context.getZooKeeperRoot() + Constants.ZNAMESPACES);
for (String id : namespaceIds) {
byte[] path = zc.get(context.getZooKeeperRoot() + Constants.ZNAMESPACES + "/" + id
+ Constants.ZNAMESPACE_NAME);
if (path != null) {
biConsumer.accept(id, new String(path, UTF_8));
}
}
}
/**
* Return sorted map with key = ID, value = namespaceName
*/
public static SortedMap<NamespaceId,String> getIdToNameMap(ClientContext context) {
SortedMap<NamespaceId,String> idMap = new TreeMap<>();
getAllNamespaces(context, (id, name) -> idMap.put(NamespaceId.of(id), name));
return idMap;
}
/**
* Return sorted map with key = namespaceName, value = ID
*/
public static SortedMap<String,NamespaceId> getNameToIdMap(ClientContext context) {
SortedMap<String,NamespaceId> nameMap = new TreeMap<>();
getAllNamespaces(context, (id, name) -> nameMap.put(name, NamespaceId.of(id)));
return nameMap;
}
/**
* Look for namespace ID in ZK. Throw NamespaceNotFoundException if not found.
*/
public static NamespaceId getNamespaceId(ClientContext context, String namespaceName)
throws NamespaceNotFoundException {
final ArrayList<NamespaceId> singleId = new ArrayList<>(1);
getAllNamespaces(context, (id, name) -> {
if (name.equals(namespaceName)) {
singleId.add(NamespaceId.of(id));
}
});
if (singleId.isEmpty()) {
throw new NamespaceNotFoundException(null, namespaceName,
"getNamespaceId() failed to find namespace");
}
return singleId.get(0);
}
/**
* Look for namespace ID in ZK. Fail quietly by logging and returning null.
*/
public static NamespaceId lookupNamespaceId(ClientContext context, String namespaceName) {
NamespaceId id = null;
try {
id = getNamespaceId(context, namespaceName);
} catch (NamespaceNotFoundException e) {
if (log.isDebugEnabled()) {
log.debug("Failed to find namespace ID from name: " + namespaceName, e);
}
}
return id;
}
/**
* Return true if namespace name exists
*/
public static boolean namespaceNameExists(ClientContext context, String namespaceName) {
return lookupNamespaceId(context, namespaceName) != null;
}
/**
* Look for namespace name in ZK. Throw NamespaceNotFoundException if not found.
*/
public static String getNamespaceName(ClientContext context, NamespaceId namespaceId)
throws NamespaceNotFoundException {
String name;
ZooCache zc = context.getZooCache();
byte[] path = zc.get(context.getZooKeeperRoot() + Constants.ZNAMESPACES + "/"
+ namespaceId.canonical() + Constants.ZNAMESPACE_NAME);
if (path != null) {
name = new String(path, UTF_8);
} else {
throw new NamespaceNotFoundException(namespaceId.canonical(), null,
"getNamespaceName() failed to find namespace");
}
return name;
}
}
| 9,854 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/TabletServerBatchDeleter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.BatchDeleter;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyIterator;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
public class TabletServerBatchDeleter extends TabletServerBatchReader implements BatchDeleter {
private final ClientContext context;
private TableId tableId;
private BatchWriterConfig bwConfig;
public TabletServerBatchDeleter(ClientContext context, TableId tableId, String tableName,
Authorizations authorizations, int numQueryThreads, BatchWriterConfig bwConfig) {
super(context, BatchDeleter.class, tableId, tableName, authorizations, numQueryThreads);
this.context = context;
this.tableId = tableId;
this.bwConfig = bwConfig;
super.addScanIterator(new IteratorSetting(Integer.MAX_VALUE,
BatchDeleter.class.getName().replaceAll("[.]", "_") + "_NOVALUE", SortedKeyIterator.class));
}
@Override
public void delete() throws MutationsRejectedException {
try (BatchWriter bw = new BatchWriterImpl(context, tableId, bwConfig)) {
Iterator<Entry<Key,Value>> iter = super.iterator();
while (iter.hasNext()) {
Entry<Key,Value> next = iter.next();
Key k = next.getKey();
Mutation m = new Mutation(k.getRow());
m.putDelete(k.getColumnFamily(), k.getColumnQualifier(),
new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp());
bw.addMutation(m);
}
}
}
}
| 9,855 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ScanServerAttemptReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import org.apache.accumulo.core.spi.scan.ScanServerAttempt;
interface ScanServerAttemptReporter {
void report(ScanServerAttempt.Result result);
}
| 9,856 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/CompressedIterators.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
import org.apache.accumulo.core.util.UnsynchronizedBuffer;
public class CompressedIterators {
private Map<String,Integer> symbolMap;
private List<String> symbolTable;
public static class IterConfig {
public List<IterInfo> ssiList = new ArrayList<>();
public Map<String,Map<String,String>> ssio = new HashMap<>();
}
public CompressedIterators() {
symbolMap = new HashMap<>();
symbolTable = new ArrayList<>();
}
public CompressedIterators(List<String> symbols) {
this.symbolTable = symbols;
}
private int getSymbolID(String symbol) {
Integer id = symbolMap.get(symbol);
if (id == null) {
id = symbolTable.size();
symbolTable.add(symbol);
symbolMap.put(symbol, id);
}
return id;
}
public ByteBuffer compress(IteratorSetting[] iterators) {
UnsynchronizedBuffer.Writer out = new UnsynchronizedBuffer.Writer(iterators.length * 8);
out.writeVInt(iterators.length);
for (IteratorSetting is : iterators) {
out.writeVInt(getSymbolID(is.getName()));
out.writeVInt(getSymbolID(is.getIteratorClass()));
out.writeVInt(is.getPriority());
Map<String,String> opts = is.getOptions();
out.writeVInt(opts.size());
for (Entry<String,String> entry : opts.entrySet()) {
out.writeVInt(getSymbolID(entry.getKey()));
out.writeVInt(getSymbolID(entry.getValue()));
}
}
return out.toByteBuffer();
}
public IterConfig decompress(ByteBuffer iterators) {
IterConfig config = new IterConfig();
UnsynchronizedBuffer.Reader in = new UnsynchronizedBuffer.Reader(iterators);
int num = in.readVInt();
for (int i = 0; i < num; i++) {
String name = symbolTable.get(in.readVInt());
String iterClass = symbolTable.get(in.readVInt());
int prio = in.readVInt();
config.ssiList.add(new IterInfo(prio, iterClass, name));
int numOpts = in.readVInt();
HashMap<String,String> opts = new HashMap<>();
for (int j = 0; j < numOpts; j++) {
String key = symbolTable.get(in.readVInt());
String val = symbolTable.get(in.readVInt());
opts.put(key, val);
}
config.ssio.put(name, opts);
}
return config;
}
public List<String> getSymbolTable() {
return symbolTable;
}
}
| 9,857 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/DelegationTokenConfigSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
import org.apache.accumulo.core.securityImpl.thrift.TDelegationTokenConfig;
/**
* Handles serialization of {@link DelegationTokenConfig}
*/
public class DelegationTokenConfigSerializer {
/**
* Serialize the delegation token config into the thrift variant
*
* @param config The configuration
*/
public static TDelegationTokenConfig serialize(DelegationTokenConfig config) {
TDelegationTokenConfig tconfig = new TDelegationTokenConfig();
tconfig.setLifetime(config.getTokenLifetime(MILLISECONDS));
return tconfig;
}
/**
* Deserialize the Thrift delegation token config into the non-thrift variant
*
* @param tconfig The thrift configuration
*/
public static DelegationTokenConfig deserialize(TDelegationTokenConfig tconfig) {
DelegationTokenConfig config = new DelegationTokenConfig();
if (tconfig.isSetLifetime()) {
config.setTokenLifetime(tconfig.getLifetime(), MILLISECONDS);
}
return config;
}
}
| 9,858 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/UserCompactionUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.admin.CompactionConfig;
import org.apache.accumulo.core.client.admin.PluginConfig;
import org.apache.hadoop.io.Text;
import com.google.common.base.Preconditions;
public class UserCompactionUtils {
private static final int MAGIC = 0x02040810;
private static final int SELECTOR_MAGIC = 0xae9270bf;
private static final int CONFIGURER_MAGIC = 0xf93e570a;
public static final PluginConfig DEFAULT_CONFIGURER = new PluginConfig("", Map.of());
public static final PluginConfig DEFAULT_SELECTOR = new PluginConfig("", Map.of());
public static void encode(DataOutput dout, Map<String,String> options) {
try {
dout.writeInt(options.size());
for (Entry<String,String> entry : options.entrySet()) {
dout.writeUTF(entry.getKey());
dout.writeUTF(entry.getValue());
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public static void encode(DataOutput dout, int magic, int version, String className,
Map<String,String> options) {
try {
dout.writeInt(magic);
dout.writeByte(version);
dout.writeUTF(className);
encode(dout, options);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public static interface Encoder<T> {
public void encode(DataOutput dout, T p);
}
public static <T> byte[] encode(T csc, Encoder<T> encoder) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
try {
encoder.encode(dos, csc);
dos.close();
return baos.toByteArray();
} catch (IOException ioe) {
throw new UncheckedIOException(ioe);
}
}
public static class PluginConfigData {
String className;
Map<String,String> opts;
}
public static Map<String,String> decodeMap(DataInput din) {
try {
int numEntries = din.readInt();
var opts = new HashMap<String,String>();
for (int i = 0; i < numEntries; i++) {
String k = din.readUTF();
String v = din.readUTF();
opts.put(k, v);
}
return opts;
} catch (IOException ioe) {
throw new UncheckedIOException(ioe);
}
}
public static PluginConfigData decode(DataInput din, int magic, int version) {
try {
if (din.readInt() != magic) {
throw new IllegalArgumentException("Unexpected MAGIC ");
}
if (din.readByte() != version) {
throw new IllegalArgumentException("Unexpected version");
}
var pcd = new PluginConfigData();
pcd.className = din.readUTF();
int numEntries = din.readInt();
pcd.opts = new HashMap<>();
for (int i = 0; i < numEntries; i++) {
String k = din.readUTF();
String v = din.readUTF();
pcd.opts.put(k, v);
}
return pcd;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public static interface Decoder<T> {
T decode(DataInput di);
}
public static <T> T decode(byte[] encodedCsc, Decoder<T> decoder) {
ByteArrayInputStream bais = new ByteArrayInputStream(encodedCsc);
DataInputStream dis = new DataInputStream(bais);
return decoder.decode(dis);
}
public static void encodeSelector(DataOutput dout, PluginConfig csc) {
encode(dout, SELECTOR_MAGIC, 1, csc.getClassName(), csc.getOptions());
}
public static byte[] encodeSelector(PluginConfig csc) {
return encode(csc, UserCompactionUtils::encodeSelector);
}
public static PluginConfig decodeSelector(DataInput di) {
var pcd = decode(di, SELECTOR_MAGIC, 1);
return new PluginConfig(pcd.className, pcd.opts);
}
public static PluginConfig decodeSelector(byte[] bytes) {
return decode(bytes, UserCompactionUtils::decodeSelector);
}
public static void encodeConfigurer(DataOutput dout, PluginConfig ccc) {
encode(dout, CONFIGURER_MAGIC, 1, ccc.getClassName(), ccc.getOptions());
}
public static byte[] encodeConfigurer(PluginConfig ccc) {
return encode(ccc, UserCompactionUtils::encodeConfigurer);
}
public static PluginConfig decodeConfigurer(DataInput di) {
var pcd = decode(di, CONFIGURER_MAGIC, 1);
return new PluginConfig(pcd.className, pcd.opts);
}
public static PluginConfig decodeConfigurer(byte[] bytes) {
return decode(bytes, UserCompactionUtils::decodeConfigurer);
}
public static byte[] encode(Map<String,String> options) {
return encode(options, UserCompactionUtils::encode);
}
public static Map<String,String> decodeMap(byte[] bytes) {
return decode(bytes, UserCompactionUtils::decodeMap);
}
public static void encode(DataOutput dout, CompactionConfig cc) {
try {
dout.writeInt(MAGIC);
dout.writeBoolean(cc.getStartRow() != null);
if (cc.getStartRow() != null) {
cc.getStartRow().write(dout);
}
dout.writeBoolean(cc.getEndRow() != null);
if (cc.getEndRow() != null) {
cc.getEndRow().write(dout);
}
dout.writeInt(cc.getIterators().size());
for (IteratorSetting is : cc.getIterators()) {
is.write(dout);
}
encodeConfigurer(dout, cc.getConfigurer());
encodeSelector(dout, cc.getSelector());
encode(dout, cc.getExecutionHints());
} catch (IOException ioe) {
throw new UncheckedIOException(ioe);
}
}
public static byte[] encode(CompactionConfig cc) {
return encode(cc, UserCompactionUtils::encode);
}
public static CompactionConfig decodeCompactionConfig(DataInput din) {
try {
Preconditions.checkArgument(MAGIC == din.readInt());
CompactionConfig cc = new CompactionConfig();
if (din.readBoolean()) {
Text startRow = new Text();
startRow.readFields(din);
cc.setStartRow(startRow);
}
if (din.readBoolean()) {
Text endRow = new Text();
endRow.readFields(din);
cc.setEndRow(endRow);
}
int num = din.readInt();
var iterators = new ArrayList<IteratorSetting>(num);
for (int i = 0; i < num; i++) {
iterators.add(new IteratorSetting(din));
}
cc.setIterators(iterators);
var configurer = decodeConfigurer(din);
if (!isDefault(configurer)) {
cc.setConfigurer(configurer);
}
var selector = decodeSelector(din);
if (!isDefault(selector)) {
cc.setSelector(selector);
}
var hints = decodeMap(din);
cc.setExecutionHints(hints);
return cc;
} catch (IOException ioe) {
throw new UncheckedIOException(ioe);
}
}
public static boolean isDefault(PluginConfig configurer) {
return configurer.equals(DEFAULT_CONFIGURER);
}
public static CompactionConfig decodeCompactionConfig(byte[] bytes) {
return decode(bytes, UserCompactionUtils::decodeCompactionConfig);
}
}
| 9,859 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/NamespaceOperationsHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.NamespaceNotFoundException;
import org.apache.accumulo.core.client.admin.NamespaceOperations;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
public abstract class NamespaceOperationsHelper implements NamespaceOperations {
@Override
public String systemNamespace() {
return Namespace.ACCUMULO.name();
}
@Override
public String defaultNamespace() {
return Namespace.DEFAULT.name();
}
@Override
public void attachIterator(String namespace, IteratorSetting setting)
throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
attachIterator(namespace, setting, EnumSet.allOf(IteratorScope.class));
}
@Override
public void attachIterator(String namespace, IteratorSetting setting,
EnumSet<IteratorScope> scopes)
throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
checkIteratorConflicts(namespace, setting, scopes);
for (IteratorScope scope : scopes) {
String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
scope.name().toLowerCase(), setting.getName());
this.modifyProperties(namespace, properties -> {
for (Entry<String,String> prop : setting.getOptions().entrySet()) {
properties.put(root + ".opt." + prop.getKey(), prop.getValue());
}
properties.put(root, setting.getPriority() + "," + setting.getIteratorClass());
});
}
}
@Override
public void removeIterator(String namespace, String name, EnumSet<IteratorScope> scopes)
throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
if (!exists(namespace)) {
throw new NamespaceNotFoundException(null, namespace, null);
}
Map<String,String> copy = Map.copyOf(this.getConfiguration(namespace));
for (IteratorScope scope : scopes) {
String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
scope.name().toLowerCase(), name);
this.modifyProperties(namespace,
properties -> copy.keySet().stream()
.filter(key -> key.equals(root) || key.startsWith(root + ".opt."))
.forEach(properties::remove));
}
}
@Override
public IteratorSetting getIteratorSetting(String namespace, String name, IteratorScope scope)
throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
if (!exists(namespace)) {
throw new NamespaceNotFoundException(null, namespace, null);
}
int priority = -1;
String classname = null;
Map<String,String> settings = new HashMap<>();
String root =
String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase(), name);
String opt = root + ".opt.";
for (Entry<String,String> property : this.getProperties(namespace)) {
if (property.getKey().equals(root)) {
String[] parts = property.getValue().split(",");
if (parts.length != 2) {
throw new AccumuloException("Bad value for iterator setting: " + property.getValue());
}
priority = Integer.parseInt(parts[0]);
classname = parts[1];
} else if (property.getKey().startsWith(opt)) {
settings.put(property.getKey().substring(opt.length()), property.getValue());
}
}
if (priority <= 0 || classname == null) {
return null;
}
return new IteratorSetting(priority, name, classname, settings);
}
@Override
public Map<String,EnumSet<IteratorScope>> listIterators(String namespace)
throws AccumuloSecurityException, AccumuloException, NamespaceNotFoundException {
if (!exists(namespace)) {
throw new NamespaceNotFoundException(null, namespace, null);
}
Map<String,EnumSet<IteratorScope>> result = new TreeMap<>();
for (Entry<String,String> property : this.getProperties(namespace)) {
String name = property.getKey();
String[] parts = name.split("\\.");
if (parts.length == 4) {
if (parts[0].equals("table") && parts[1].equals("iterator")) {
IteratorScope scope = IteratorScope.valueOf(parts[2]);
if (!result.containsKey(parts[3])) {
result.put(parts[3], EnumSet.noneOf(IteratorScope.class));
}
result.get(parts[3]).add(scope);
}
}
}
return result;
}
@Override
public void checkIteratorConflicts(String namespace, IteratorSetting setting,
EnumSet<IteratorScope> scopes)
throws AccumuloException, NamespaceNotFoundException, AccumuloSecurityException {
if (!exists(namespace)) {
throw new NamespaceNotFoundException(null, namespace, null);
}
for (IteratorScope scope : scopes) {
String scopeStr =
String.format("%s%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase());
String nameStr = String.format("%s.%s", scopeStr, setting.getName());
String optStr = String.format("%s.opt.", nameStr);
Map<String,String> optionConflicts = new TreeMap<>();
for (Entry<String,String> property : this.getProperties(namespace)) {
if (property.getKey().startsWith(scopeStr)) {
if (property.getKey().equals(nameStr)) {
throw new AccumuloException(new IllegalArgumentException("iterator name conflict for "
+ setting.getName() + ": " + property.getKey() + "=" + property.getValue()));
}
if (property.getKey().startsWith(optStr)) {
optionConflicts.put(property.getKey(), property.getValue());
}
if (property.getKey().contains(".opt.")) {
continue;
}
String[] parts = property.getValue().split(",");
if (parts.length != 2) {
throw new AccumuloException("Bad value for existing iterator setting: "
+ property.getKey() + "=" + property.getValue());
}
try {
if (Integer.parseInt(parts[0]) == setting.getPriority()) {
throw new AccumuloException(new IllegalArgumentException(
"iterator priority conflict: " + property.getKey() + "=" + property.getValue()));
}
} catch (NumberFormatException e) {
throw new AccumuloException("Bad value for existing iterator setting: "
+ property.getKey() + "=" + property.getValue());
}
}
}
if (!optionConflicts.isEmpty()) {
throw new AccumuloException(new IllegalArgumentException(
"iterator options conflict for " + setting.getName() + ": " + optionConflicts));
}
}
}
@Override
public int addConstraint(String namespace, String constraintClassName)
throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
TreeSet<Integer> constraintNumbers = new TreeSet<>();
TreeMap<String,Integer> constraintClasses = new TreeMap<>();
int i;
for (Entry<String,String> property : this.getProperties(namespace)) {
if (property.getKey().startsWith(Property.TABLE_CONSTRAINT_PREFIX.toString())) {
try {
i = Integer.parseInt(
property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length()));
} catch (NumberFormatException e) {
throw new AccumuloException("Bad key for existing constraint: " + property);
}
constraintNumbers.add(i);
constraintClasses.put(property.getValue(), i);
}
}
i = 1;
while (constraintNumbers.contains(i)) {
i++;
}
if (constraintClasses.containsKey(constraintClassName)) {
throw new AccumuloException(
"Constraint " + constraintClassName + " already exists for namespace " + namespace
+ " with number " + constraintClasses.get(constraintClassName));
}
this.setProperty(namespace, Property.TABLE_CONSTRAINT_PREFIX.toString() + i,
constraintClassName);
return i;
}
@Override
public void removeConstraint(String namespace, int number)
throws AccumuloException, AccumuloSecurityException, NamespaceNotFoundException {
this.removeProperty(namespace, Property.TABLE_CONSTRAINT_PREFIX.toString() + number);
}
@Override
public Map<String,Integer> listConstraints(String namespace)
throws AccumuloException, NamespaceNotFoundException, AccumuloSecurityException {
Map<String,Integer> constraints = new TreeMap<>();
for (Entry<String,String> property : this.getProperties(namespace)) {
if (property.getKey().startsWith(Property.TABLE_CONSTRAINT_PREFIX.toString())) {
if (constraints.containsKey(property.getValue())) {
throw new AccumuloException("Same constraint configured twice: " + property.getKey() + "="
+ Property.TABLE_CONSTRAINT_PREFIX + constraints.get(property.getValue()) + "="
+ property.getKey());
}
try {
constraints.put(property.getValue(), Integer.parseInt(
property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length())));
} catch (NumberFormatException e) {
throw new AccumuloException("Bad key for existing constraint: " + property);
}
}
}
return constraints;
}
}
| 9,860 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.net.URL;
import java.nio.file.Path;
import java.util.Properties;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.hadoop.conf.Configuration;
/**
* Accumulo client information. Can be built using {@link Accumulo#newClient()}
*
* @since 2.0.0
*/
public interface ClientInfo {
/**
* @return Accumulo instance name
*/
String getInstanceName();
/**
* @return Zookeeper connection information for Accumulo instance
*/
String getZooKeepers();
/**
* @return ZooKeeper connection timeout
*/
int getZooKeepersSessionTimeOut();
/**
* @return Accumulo principal/username
*/
String getPrincipal();
/**
* @return {@link AuthenticationToken} used for this connection
*/
AuthenticationToken getAuthenticationToken();
/**
* @return True if SASL enabled
*/
boolean saslEnabled();
/**
* @return All Accumulo client properties set for this connection
*/
Properties getProperties();
/**
* @return hadoop Configuration
*/
Configuration getHadoopConf();
/**
* @return ClientInfo given properties
*/
static ClientInfo from(Properties properties) {
return new ClientInfoImpl(properties);
}
/**
* @return ClientInfo given URL path to client config file
*/
static ClientInfo from(URL propertiesURL) {
return new ClientInfoImpl(propertiesURL);
}
/**
* @return ClientInfo given properties and token
*/
static ClientInfo from(Properties properties, AuthenticationToken token) {
return new ClientInfoImpl(properties, token);
}
/**
* @return ClientInfo given path to client config file
*/
static ClientInfo from(Path propertiesFile) {
return new ClientInfoImpl(propertiesFile);
}
}
| 9,861 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/SecurityOperationsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.accumulo.core.client.security.SecurityErrorCode.NAMESPACE_DOESNT_EXIST;
import static org.apache.accumulo.core.util.Validators.EXISTING_NAMESPACE_NAME;
import java.nio.ByteBuffer;
import java.util.Set;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
import org.apache.accumulo.core.client.admin.SecurityOperations;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.DelegationToken;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.clientImpl.thrift.ClientService;
import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode;
import org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType;
import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes.Exec;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes.ExecVoid;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.NamespacePermission;
import org.apache.accumulo.core.security.SystemPermission;
import org.apache.accumulo.core.security.TablePermission;
import org.apache.accumulo.core.securityImpl.thrift.TDelegationToken;
import org.apache.accumulo.core.securityImpl.thrift.TDelegationTokenConfig;
import org.apache.accumulo.core.trace.TraceUtil;
import org.apache.accumulo.core.util.ByteBufferUtil;
public class SecurityOperationsImpl implements SecurityOperations {
private final ClientContext context;
/**
* Execute a method on the client API that does not return a value
*
* @param exec client operation to execute
* @throws AccumuloException error executing client operation
* @throws AccumuloSecurityException error executing client operation
*/
private void executeVoid(ExecVoid<ClientService.Client> exec)
throws AccumuloException, AccumuloSecurityException {
try {
ThriftClientTypes.CLIENT.executeVoid(context, client -> exec.execute(client));
} catch (AccumuloSecurityException | AccumuloException e) {
Throwable t = e.getCause();
if (t instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) t;
// recast missing table
if (ttoe.getType() == TableOperationExceptionType.NOTFOUND) {
throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST);
} else if (ttoe.getType() == TableOperationExceptionType.NAMESPACE_NOTFOUND) {
throw new AccumuloSecurityException(null, SecurityErrorCode.NAMESPACE_DOESNT_EXIST);
} else {
throw e;
}
}
throw e;
} catch (Exception e) {
throw new AccumuloException(e);
}
}
/**
* Execute a method on the client API that returns an instance of type R
*
* @param <R> return type
* @param exec client operation to execute
* @return instance of type R
* @throws AccumuloException error executing client operation
* @throws AccumuloSecurityException error executing client operation
*/
private <R> R execute(Exec<R,ClientService.Client> exec)
throws AccumuloException, AccumuloSecurityException {
try {
return ThriftClientTypes.CLIENT.execute(context, client -> exec.execute(client));
} catch (AccumuloSecurityException | AccumuloException e) {
Throwable t = e.getCause();
if (t instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) t;
// recast missing table
if (ttoe.getType() == TableOperationExceptionType.NOTFOUND) {
throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST);
} else if (ttoe.getType() == TableOperationExceptionType.NAMESPACE_NOTFOUND) {
throw new AccumuloSecurityException(null, SecurityErrorCode.NAMESPACE_DOESNT_EXIST);
} else {
throw e;
}
}
throw e;
} catch (Exception e) {
throw new AccumuloException(e);
}
}
public SecurityOperationsImpl(ClientContext context) {
checkArgument(context != null, "context is null");
this.context = context;
}
@Override
public void createLocalUser(final String principal, final PasswordToken password)
throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
if (context.getSaslParams() == null) {
checkArgument(password != null, "password is null");
}
executeVoid(client -> {
if (context.getSaslParams() == null) {
client.createLocalUser(TraceUtil.traceInfo(), context.rpcCreds(), principal,
ByteBuffer.wrap(password.getPassword()));
} else {
client.createLocalUser(TraceUtil.traceInfo(), context.rpcCreds(), principal,
ByteBuffer.wrap(new byte[0]));
}
});
}
@Override
public void dropLocalUser(final String principal)
throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
executeVoid(
client -> client.dropLocalUser(TraceUtil.traceInfo(), context.rpcCreds(), principal));
}
@Override
public boolean authenticateUser(final String principal, final AuthenticationToken token)
throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
checkArgument(token != null, "token is null");
final Credentials toAuth = new Credentials(principal, token);
return execute(client -> client.authenticateUser(TraceUtil.traceInfo(), context.rpcCreds(),
toAuth.toThrift(context.getInstanceID())));
}
@Override
public void changeLocalUserPassword(final String principal, final PasswordToken token)
throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
checkArgument(token != null, "token is null");
final Credentials toChange = new Credentials(principal, token);
executeVoid(client -> client.changeLocalUserPassword(TraceUtil.traceInfo(), context.rpcCreds(),
principal, ByteBuffer.wrap(token.getPassword())));
if (context.getCredentials().getPrincipal().equals(principal)) {
context.setCredentials(toChange);
}
}
@Override
public void changeUserAuthorizations(final String principal, final Authorizations authorizations)
throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
checkArgument(authorizations != null, "authorizations is null");
executeVoid(client -> client.changeAuthorizations(TraceUtil.traceInfo(), context.rpcCreds(),
principal, ByteBufferUtil.toByteBuffers(authorizations.getAuthorizations())));
}
@Override
public Authorizations getUserAuthorizations(final String principal)
throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
return execute(client -> new Authorizations(
client.getUserAuthorizations(TraceUtil.traceInfo(), context.rpcCreds(), principal)));
}
@Override
public boolean hasSystemPermission(final String principal, final SystemPermission perm)
throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
checkArgument(perm != null, "perm is null");
return execute(client -> client.hasSystemPermission(TraceUtil.traceInfo(), context.rpcCreds(),
principal, perm.getId()));
}
@Override
public boolean hasTablePermission(final String principal, final String table,
final TablePermission perm) throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
checkArgument(table != null, "table is null");
checkArgument(perm != null, "perm is null");
try {
return execute(client -> client.hasTablePermission(TraceUtil.traceInfo(), context.rpcCreds(),
principal, table, perm.getId()));
} catch (AccumuloSecurityException e) {
if (e.getSecurityErrorCode() == NAMESPACE_DOESNT_EXIST) {
throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST, e);
} else {
throw e;
}
}
}
@Override
public boolean hasNamespacePermission(final String principal, final String namespace,
final NamespacePermission permission) throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
EXISTING_NAMESPACE_NAME.validate(namespace);
checkArgument(permission != null, "permission is null");
return execute(client -> client.hasNamespacePermission(TraceUtil.traceInfo(),
context.rpcCreds(), principal, namespace, permission.getId()));
}
@Override
public void grantSystemPermission(final String principal, final SystemPermission permission)
throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
checkArgument(permission != null, "permission is null");
executeVoid(client -> client.grantSystemPermission(TraceUtil.traceInfo(), context.rpcCreds(),
principal, permission.getId()));
}
@Override
public void grantTablePermission(final String principal, final String table,
final TablePermission permission) throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
checkArgument(table != null, "table is null");
checkArgument(permission != null, "permission is null");
try {
executeVoid(client -> client.grantTablePermission(TraceUtil.traceInfo(), context.rpcCreds(),
principal, table, permission.getId()));
} catch (AccumuloSecurityException e) {
if (e.getSecurityErrorCode() == NAMESPACE_DOESNT_EXIST) {
throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST, e);
} else {
throw e;
}
}
}
@Override
public void grantNamespacePermission(final String principal, final String namespace,
final NamespacePermission permission) throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
EXISTING_NAMESPACE_NAME.validate(namespace);
checkArgument(permission != null, "permission is null");
executeVoid(client -> client.grantNamespacePermission(TraceUtil.traceInfo(), context.rpcCreds(),
principal, namespace, permission.getId()));
}
@Override
public void revokeSystemPermission(final String principal, final SystemPermission permission)
throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
checkArgument(permission != null, "permission is null");
executeVoid(client -> client.revokeSystemPermission(TraceUtil.traceInfo(), context.rpcCreds(),
principal, permission.getId()));
}
@Override
public void revokeTablePermission(final String principal, final String table,
final TablePermission permission) throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
checkArgument(table != null, "table is null");
checkArgument(permission != null, "permission is null");
try {
executeVoid(client -> client.revokeTablePermission(TraceUtil.traceInfo(), context.rpcCreds(),
principal, table, permission.getId()));
} catch (AccumuloSecurityException e) {
if (e.getSecurityErrorCode() == NAMESPACE_DOESNT_EXIST) {
throw new AccumuloSecurityException(null, SecurityErrorCode.TABLE_DOESNT_EXIST, e);
} else {
throw e;
}
}
}
@Override
public void revokeNamespacePermission(final String principal, final String namespace,
final NamespacePermission permission) throws AccumuloException, AccumuloSecurityException {
checkArgument(principal != null, "principal is null");
EXISTING_NAMESPACE_NAME.validate(namespace);
checkArgument(permission != null, "permission is null");
executeVoid(client -> client.revokeNamespacePermission(TraceUtil.traceInfo(),
context.rpcCreds(), principal, namespace, permission.getId()));
}
@Override
public Set<String> listLocalUsers() throws AccumuloException, AccumuloSecurityException {
return execute(client -> client.listLocalUsers(TraceUtil.traceInfo(), context.rpcCreds()));
}
@Override
public DelegationToken getDelegationToken(DelegationTokenConfig cfg)
throws AccumuloException, AccumuloSecurityException {
final TDelegationTokenConfig tConfig;
if (cfg != null) {
tConfig = DelegationTokenConfigSerializer.serialize(cfg);
} else {
tConfig = new TDelegationTokenConfig();
}
TDelegationToken thriftToken;
try {
thriftToken = ThriftClientTypes.MANAGER.executeTableCommand(context,
client -> client.getDelegationToken(TraceUtil.traceInfo(), context.rpcCreds(), tConfig));
} catch (TableNotFoundException e) {
// should never happen
throw new AssertionError(
"Received TableNotFoundException on method which should not throw that exception", e);
}
AuthenticationTokenIdentifier identifier =
new AuthenticationTokenIdentifier(thriftToken.getIdentifier());
// Get the password out of the thrift delegation token
return new DelegationTokenImpl(thriftToken.getPassword(), identifier);
}
}
| 9,862 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/MultiTableBatchWriterImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import java.lang.ref.Cleaner.Cleanable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.MultiTableBatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.util.cleaner.CleanerUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MultiTableBatchWriterImpl implements MultiTableBatchWriter {
private static final Logger log = LoggerFactory.getLogger(MultiTableBatchWriterImpl.class);
private class TableBatchWriter implements BatchWriter {
private final TableId tableId;
private TableBatchWriter(TableId tableId) {
this.tableId = tableId;
}
@Override
public void addMutation(Mutation m) throws MutationsRejectedException {
checkArgument(m != null, "m is null");
bw.addMutation(tableId, m);
}
@Override
public void addMutations(Iterable<Mutation> iterable) throws MutationsRejectedException {
bw.addMutation(tableId, iterable.iterator());
}
@Override
public void close() {
throw new UnsupportedOperationException(
"Must close all tables, can not close an individual table");
}
@Override
public void flush() {
throw new UnsupportedOperationException(
"Must flush all tables, can not flush an individual table");
}
}
private final ConcurrentHashMap<TableId,BatchWriter> tableWriters = new ConcurrentHashMap<>();
private final AtomicBoolean closed = new AtomicBoolean(false);
private final ClientContext context;
private final TabletServerBatchWriter bw;
private final Cleanable cleanable;
MultiTableBatchWriterImpl(ClientContext context, BatchWriterConfig config) {
checkArgument(context != null, "context is null");
checkArgument(config != null, "config is null");
this.context = context;
this.bw = new TabletServerBatchWriter(context, config);
this.cleanable = CleanerUtil.unclosed(this, MultiTableBatchWriter.class, closed, log, bw);
}
@Override
public boolean isClosed() {
return closed.get();
}
@Override
public void close() throws MutationsRejectedException {
if (closed.compareAndSet(false, true)) {
// deregister cleanable, but it won't run because it checks
// the value of closed first, which is now true
cleanable.clean();
bw.close();
}
}
@Override
public BatchWriter getBatchWriter(String tableName) throws TableNotFoundException {
checkArgument(tableName != null, "tableName is null");
TableId tableId = context.getTableId(tableName);
BatchWriter tbw = tableWriters.get(tableId);
if (tbw == null) {
tbw = new TableBatchWriter(tableId);
BatchWriter current = tableWriters.putIfAbsent(tableId, tbw);
// return the current one if another thread created one first
return current != null ? current : tbw;
} else {
return tbw;
}
}
@Override
public void flush() throws MutationsRejectedException {
bw.flush();
}
}
| 9,863 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/SyncingTabletLocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.hadoop.io.Text;
/**
* Syncs itself with the static collection of TabletLocators, so that when the server clears it, it
* will automatically get the most up-to-date version. Caching TabletLocators locally is safe when
* using SyncingTabletLocator.
*/
public class SyncingTabletLocator extends TabletLocator {
private volatile TabletLocator locator;
private final Supplier<TabletLocator> getLocatorFunction;
public SyncingTabletLocator(Supplier<TabletLocator> getLocatorFunction) {
this.getLocatorFunction = getLocatorFunction;
this.locator = getLocatorFunction.get();
}
public SyncingTabletLocator(final ClientContext context, final TableId tableId) {
this(() -> TabletLocator.getLocator(context, tableId));
}
private TabletLocator syncLocator() {
TabletLocator loc = this.locator;
if (!loc.isValid()) {
synchronized (this) {
if (locator == loc) {
loc = locator = getLocatorFunction.get();
}
}
}
return loc;
}
@Override
public TabletLocation locateTablet(ClientContext context, Text row, boolean skipRow,
boolean retry) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
return syncLocator().locateTablet(context, row, skipRow, retry);
}
@Override
public <T extends Mutation> void binMutations(ClientContext context, List<T> mutations,
Map<String,TabletServerMutations<T>> binnedMutations, List<T> failures)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
syncLocator().binMutations(context, mutations, binnedMutations, failures);
}
@Override
public List<Range> binRanges(ClientContext context, List<Range> ranges,
Map<String,Map<KeyExtent,List<Range>>> binnedRanges)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
return syncLocator().binRanges(context, ranges, binnedRanges);
}
@Override
public void invalidateCache(KeyExtent failedExtent) {
syncLocator().invalidateCache(failedExtent);
}
@Override
public void invalidateCache(Collection<KeyExtent> keySet) {
syncLocator().invalidateCache(keySet);
}
@Override
public void invalidateCache() {
syncLocator().invalidateCache();
}
@Override
public void invalidateCache(ClientContext context, String server) {
syncLocator().invalidateCache(context, server);
}
}
| 9,864 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ActiveScanImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.ActiveScan;
import org.apache.accumulo.core.client.admin.ScanState;
import org.apache.accumulo.core.client.admin.ScanType;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
import org.apache.accumulo.core.dataImpl.thrift.TColumn;
import org.apache.accumulo.core.security.Authorizations;
/**
* A class that contains information about an ActiveScan
*
* @since 1.6.0
*/
public class ActiveScanImpl extends ActiveScan {
private long scanId;
private String client;
private String tableName;
private long age;
private long idle;
private ScanType type;
private ScanState state;
private KeyExtent extent;
private List<Column> columns;
private List<String> ssiList;
private Map<String,Map<String,String>> ssio;
private String user;
private Authorizations authorizations;
ActiveScanImpl(ClientContext context,
org.apache.accumulo.core.tabletscan.thrift.ActiveScan activeScan)
throws TableNotFoundException {
this.scanId = activeScan.scanId;
this.client = activeScan.client;
this.user = activeScan.user;
this.age = activeScan.age;
this.idle = activeScan.idleTime;
this.tableName = context.getTableName(TableId.of(activeScan.tableId));
this.type = ScanType.valueOf(activeScan.getType().name());
this.state = ScanState.valueOf(activeScan.state.name());
this.extent = KeyExtent.fromThrift(activeScan.extent);
this.authorizations = new Authorizations(activeScan.authorizations);
this.columns = new ArrayList<>(activeScan.columns.size());
for (TColumn tcolumn : activeScan.columns) {
this.columns.add(new Column(tcolumn));
}
this.ssiList = new ArrayList<>();
for (IterInfo ii : activeScan.ssiList) {
this.ssiList.add(ii.iterName + "=" + ii.priority + "," + ii.className);
}
this.ssio = activeScan.ssio;
}
@Override
public long getScanid() {
return scanId;
}
@Override
public String getClient() {
return client;
}
@Override
public String getUser() {
return user;
}
@Override
public String getTable() {
return tableName;
}
@Override
public long getAge() {
return age;
}
@Override
public long getLastContactTime() {
return idle;
}
@Override
public ScanType getType() {
return type;
}
@Override
public ScanState getState() {
return state;
}
@Override
public TabletId getTablet() {
return new TabletIdImpl(extent);
}
@Override
public List<Column> getColumns() {
return columns;
}
@Override
public List<String> getSsiList() {
return ssiList;
}
@Override
public Map<String,Map<String,String>> getSsio() {
return ssio;
}
@Override
public Authorizations getAuthorizations() {
return authorizations;
}
@Override
public long getIdleTime() {
return idle;
}
}
| 9,865 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/TableOperationsHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.accumulo.core.util.Validators.EXISTING_TABLE_NAME;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.TableOperations;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
public abstract class TableOperationsHelper implements TableOperations {
@Override
public void attachIterator(String tableName, IteratorSetting setting)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
attachIterator(tableName, setting, EnumSet.allOf(IteratorScope.class));
}
@Override
public void attachIterator(String tableName, IteratorSetting setting,
EnumSet<IteratorScope> scopes)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
checkArgument(setting != null, "setting is null");
checkArgument(scopes != null, "scopes is null");
checkIteratorConflicts(tableName, setting, scopes);
for (IteratorScope scope : scopes) {
String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
scope.name().toLowerCase(), setting.getName());
this.modifyProperties(tableName, properties -> {
for (Entry<String,String> prop : setting.getOptions().entrySet()) {
properties.put(root + ".opt." + prop.getKey(), prop.getValue());
}
properties.put(root, setting.getPriority() + "," + setting.getIteratorClass());
});
}
}
@Override
public void removeIterator(String tableName, String name, EnumSet<IteratorScope> scopes)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
Map<String,String> copy = Map.copyOf(this.getConfiguration(tableName));
for (IteratorScope scope : scopes) {
String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
scope.name().toLowerCase(), name);
this.modifyProperties(tableName,
properties -> copy.keySet().stream()
.filter(prop -> prop.equals(root) || prop.startsWith(root + ".opt."))
.forEach(properties::remove));
}
}
@Override
public IteratorSetting getIteratorSetting(String tableName, String name, IteratorScope scope)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
checkArgument(name != null, "name is null");
checkArgument(scope != null, "scope is null");
int priority = -1;
String classname = null;
Map<String,String> settings = new HashMap<>();
String root =
String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase(), name);
String opt = root + ".opt.";
for (Entry<String,String> property : this.getProperties(tableName)) {
if (property.getKey().equals(root)) {
String[] parts = property.getValue().split(",");
if (parts.length != 2) {
throw new AccumuloException("Bad value for iterator setting: " + property.getValue());
}
priority = Integer.parseInt(parts[0]);
classname = parts[1];
} else if (property.getKey().startsWith(opt)) {
settings.put(property.getKey().substring(opt.length()), property.getValue());
}
}
if (priority <= 0 || classname == null) {
return null;
}
return new IteratorSetting(priority, name, classname, settings);
}
@Override
public Map<String,EnumSet<IteratorScope>> listIterators(String tableName)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
Map<String,EnumSet<IteratorScope>> result = new TreeMap<>();
for (Entry<String,String> property : this.getProperties(tableName)) {
String name = property.getKey();
String[] parts = name.split("\\.");
if (parts.length == 4) {
if (parts[0].equals("table") && parts[1].equals("iterator")) {
IteratorScope scope = IteratorScope.valueOf(parts[2]);
if (!result.containsKey(parts[3])) {
result.put(parts[3], EnumSet.noneOf(IteratorScope.class));
}
result.get(parts[3]).add(scope);
}
}
}
return result;
}
public static void checkIteratorConflicts(Map<String,String> props, IteratorSetting setting,
EnumSet<IteratorScope> scopes) throws AccumuloException {
checkArgument(setting != null, "setting is null");
checkArgument(scopes != null, "scopes is null");
for (IteratorScope scope : scopes) {
String scopeStr =
String.format("%s%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase());
String nameStr = String.format("%s.%s", scopeStr, setting.getName());
String optStr = String.format("%s.opt.", nameStr);
Map<String,String> optionConflicts = new TreeMap<>();
for (Entry<String,String> property : props.entrySet()) {
if (property.getKey().startsWith(scopeStr)) {
if (property.getKey().equals(nameStr)) {
throw new AccumuloException(new IllegalArgumentException("iterator name conflict for "
+ setting.getName() + ": " + property.getKey() + "=" + property.getValue()));
}
if (property.getKey().startsWith(optStr)) {
optionConflicts.put(property.getKey(), property.getValue());
}
if (property.getKey().contains(".opt.")) {
continue;
}
String[] parts = property.getValue().split(",");
if (parts.length != 2) {
throw new AccumuloException("Bad value for existing iterator setting: "
+ property.getKey() + "=" + property.getValue());
}
try {
if (Integer.parseInt(parts[0]) == setting.getPriority()) {
throw new AccumuloException(new IllegalArgumentException(
"iterator priority conflict: " + property.getKey() + "=" + property.getValue()));
}
} catch (NumberFormatException e) {
throw new AccumuloException("Bad value for existing iterator setting: "
+ property.getKey() + "=" + property.getValue());
}
}
}
if (!optionConflicts.isEmpty()) {
throw new AccumuloException(new IllegalArgumentException(
"iterator options conflict for " + setting.getName() + ": " + optionConflicts));
}
}
}
@Override
public void checkIteratorConflicts(String tableName, IteratorSetting setting,
EnumSet<IteratorScope> scopes) throws AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
Map<String,String> iteratorProps = Map.copyOf(this.getConfiguration(tableName));
checkIteratorConflicts(iteratorProps, setting, scopes);
}
@Override
public int addConstraint(String tableName, String constraintClassName)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
TreeSet<Integer> constraintNumbers = new TreeSet<>();
TreeMap<String,Integer> constraintClasses = new TreeMap<>();
int i;
for (Entry<String,String> property : this.getProperties(tableName)) {
if (property.getKey().startsWith(Property.TABLE_CONSTRAINT_PREFIX.toString())) {
try {
i = Integer.parseInt(
property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length()));
} catch (NumberFormatException e) {
throw new AccumuloException("Bad key for existing constraint: " + property);
}
constraintNumbers.add(i);
constraintClasses.put(property.getValue(), i);
}
}
i = 1;
while (constraintNumbers.contains(i)) {
i++;
}
if (constraintClasses.containsKey(constraintClassName)) {
throw new AccumuloException("Constraint " + constraintClassName + " already exists for table "
+ tableName + " with number " + constraintClasses.get(constraintClassName));
}
this.setProperty(tableName, Property.TABLE_CONSTRAINT_PREFIX.toString() + i,
constraintClassName);
return i;
}
@Override
public void removeConstraint(String tableName, int number)
throws AccumuloException, AccumuloSecurityException {
this.removeProperty(tableName, Property.TABLE_CONSTRAINT_PREFIX.toString() + number);
}
@Override
public Map<String,Integer> listConstraints(String tableName)
throws AccumuloException, TableNotFoundException {
EXISTING_TABLE_NAME.validate(tableName);
Map<String,Integer> constraints = new TreeMap<>();
for (Entry<String,String> property : this.getProperties(tableName)) {
if (property.getKey().startsWith(Property.TABLE_CONSTRAINT_PREFIX.toString())) {
if (constraints.containsKey(property.getValue())) {
throw new AccumuloException("Same constraint configured twice: " + property.getKey() + "="
+ Property.TABLE_CONSTRAINT_PREFIX + constraints.get(property.getValue()) + "="
+ property.getKey());
}
try {
constraints.put(property.getValue(), Integer.parseInt(
property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length())));
} catch (NumberFormatException e) {
throw new AccumuloException("Bad key for existing constraint: " + property);
}
}
}
return constraints;
}
}
| 9,866 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/TabletLocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.InstanceId;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.metadata.MetadataLocationObtainer;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.RootTable;
import org.apache.accumulo.core.singletons.SingletonManager;
import org.apache.accumulo.core.singletons.SingletonService;
import org.apache.accumulo.core.util.Interner;
import org.apache.hadoop.io.Text;
import com.google.common.base.Preconditions;
public abstract class TabletLocator {
/**
* Flipped false on call to {@link #clearLocators}. Checked by client classes that locally cache
* Locators.
*/
private volatile boolean isValid = true;
boolean isValid() {
return isValid;
}
public abstract TabletLocation locateTablet(ClientContext context, Text row, boolean skipRow,
boolean retry) throws AccumuloException, AccumuloSecurityException, TableNotFoundException;
public abstract <T extends Mutation> void binMutations(ClientContext context, List<T> mutations,
Map<String,TabletServerMutations<T>> binnedMutations, List<T> failures)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException;
public abstract List<Range> binRanges(ClientContext context, List<Range> ranges,
Map<String,Map<KeyExtent,List<Range>>> binnedRanges)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException;
public abstract void invalidateCache(KeyExtent failedExtent);
public abstract void invalidateCache(Collection<KeyExtent> keySet);
/**
* Invalidate entire cache
*/
public abstract void invalidateCache();
/**
* Invalidate all metadata entries that point to server
*/
public abstract void invalidateCache(ClientContext context, String server);
private static class LocatorKey {
InstanceId instanceId;
TableId tableId;
LocatorKey(InstanceId instanceId, TableId table) {
this.instanceId = instanceId;
this.tableId = table;
}
@Override
public int hashCode() {
return instanceId.hashCode() + tableId.hashCode();
}
@Override
public boolean equals(Object o) {
if (o instanceof LocatorKey) {
return equals((LocatorKey) o);
}
return false;
}
public boolean equals(LocatorKey lk) {
return instanceId.equals(lk.instanceId) && tableId.equals(lk.tableId);
}
}
private static final HashMap<LocatorKey,TabletLocator> locators = new HashMap<>();
private static boolean enabled = true;
public static synchronized void clearLocators() {
for (TabletLocator locator : locators.values()) {
locator.isValid = false;
}
locators.clear();
}
static synchronized boolean isEnabled() {
return enabled;
}
static synchronized void disable() {
clearLocators();
enabled = false;
}
static synchronized void enable() {
enabled = true;
}
public static synchronized TabletLocator getLocator(ClientContext context, TableId tableId) {
Preconditions.checkState(enabled, "The Accumulo singleton that that tracks tablet locations is "
+ "disabled. This is likely caused by all AccumuloClients being closed or garbage collected");
LocatorKey key = new LocatorKey(context.getInstanceID(), tableId);
TabletLocator tl = locators.get(key);
if (tl == null) {
MetadataLocationObtainer mlo = new MetadataLocationObtainer();
if (RootTable.ID.equals(tableId)) {
tl = new RootTabletLocator(new ZookeeperLockChecker(context));
} else if (MetadataTable.ID.equals(tableId)) {
tl = new TabletLocatorImpl(MetadataTable.ID, getLocator(context, RootTable.ID), mlo,
new ZookeeperLockChecker(context));
} else {
tl = new TabletLocatorImpl(tableId, getLocator(context, MetadataTable.ID), mlo,
new ZookeeperLockChecker(context));
}
locators.put(key, tl);
}
return tl;
}
static {
SingletonManager.register(new SingletonService() {
@Override
public boolean isEnabled() {
return TabletLocator.isEnabled();
}
@Override
public void enable() {
TabletLocator.enable();
}
@Override
public void disable() {
TabletLocator.disable();
}
});
}
public static class TabletLocations {
private final List<TabletLocation> locations;
private final List<KeyExtent> locationless;
public TabletLocations(List<TabletLocation> locations, List<KeyExtent> locationless) {
this.locations = locations;
this.locationless = locationless;
}
public List<TabletLocation> getLocations() {
return locations;
}
public List<KeyExtent> getLocationless() {
return locationless;
}
}
public static class TabletLocation {
private static final Interner<String> interner = new Interner<>();
private final KeyExtent tablet_extent;
private final String tserverLocation;
private final String tserverSession;
public TabletLocation(KeyExtent tablet_extent, String tablet_location, String session) {
checkArgument(tablet_extent != null, "tablet_extent is null");
checkArgument(tablet_location != null, "tablet_location is null");
checkArgument(session != null, "session is null");
this.tablet_extent = tablet_extent;
this.tserverLocation = interner.intern(tablet_location);
this.tserverSession = interner.intern(session);
}
@Override
public boolean equals(Object o) {
if (o instanceof TabletLocation) {
TabletLocation otl = (TabletLocation) o;
return getExtent().equals(otl.getExtent())
&& getTserverLocation().equals(otl.getTserverLocation())
&& getTserverSession().equals(otl.getTserverSession());
}
return false;
}
@Override
public int hashCode() {
return Objects.hash(getExtent(), tserverLocation, tserverSession);
}
@Override
public String toString() {
return "(" + getExtent() + "," + getTserverLocation() + "," + getTserverSession() + ")";
}
public KeyExtent getExtent() {
return tablet_extent;
}
public String getTserverLocation() {
return tserverLocation;
}
public String getTserverSession() {
return tserverSession;
}
}
public static class TabletServerMutations<T extends Mutation> {
private Map<KeyExtent,List<T>> mutations;
private String tserverSession;
public TabletServerMutations(String tserverSession) {
this.tserverSession = tserverSession;
this.mutations = new HashMap<>();
}
public void addMutation(KeyExtent ke, T m) {
List<T> mutList = mutations.computeIfAbsent(ke, k -> new ArrayList<>());
mutList.add(m);
}
public Map<KeyExtent,List<T>> getMutations() {
return mutations;
}
final String getSession() {
return tserverSession;
}
}
}
| 9,867 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ScanServerAttemptImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.Objects;
import org.apache.accumulo.core.spi.scan.ScanServerAttempt;
class ScanServerAttemptImpl implements ScanServerAttempt {
private final String server;
private final Result result;
ScanServerAttemptImpl(Result result, String server) {
this.result = result;
this.server = Objects.requireNonNull(server);
}
@Override
public String getServer() {
return server;
}
@Override
public Result getResult() {
return result;
}
}
| 9,868 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/TabletLocatorImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.util.OpTimer;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.core.util.TextUtil;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class TabletLocatorImpl extends TabletLocator {
private static final Logger log = LoggerFactory.getLogger(TabletLocatorImpl.class);
// MAX_TEXT represents a TEXT object that is greater than all others. Attempted to use null for
// this purpose, but there seems to be a bug in TreeMap.tailMap with null. Therefore instead of
// using null, created MAX_TEXT.
static final Text MAX_TEXT = new Text();
static final Comparator<Text> END_ROW_COMPARATOR = (o1, o2) -> {
if (o1 == o2) {
return 0;
}
if (o1 == MAX_TEXT) {
return 1;
}
if (o2 == MAX_TEXT) {
return -1;
}
return o1.compareTo(o2);
};
protected TableId tableId;
protected TabletLocator parent;
protected TreeMap<Text,TabletLocation> metaCache = new TreeMap<>(END_ROW_COMPARATOR);
protected TabletLocationObtainer locationObtainer;
private final TabletServerLockChecker lockChecker;
protected Text lastTabletRow;
private final TreeSet<KeyExtent> badExtents = new TreeSet<>();
private final ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
private final Lock rLock = rwLock.readLock();
private final Lock wLock = rwLock.writeLock();
public interface TabletLocationObtainer {
/**
* @return null when unable to read information successfully
*/
TabletLocations lookupTablet(ClientContext context, TabletLocation src, Text row, Text stopRow,
TabletLocator parent) throws AccumuloSecurityException, AccumuloException;
List<TabletLocation> lookupTablets(ClientContext context, String tserver,
Map<KeyExtent,List<Range>> map, TabletLocator parent)
throws AccumuloSecurityException, AccumuloException;
}
public interface TabletServerLockChecker {
boolean isLockHeld(String tserver, String session);
void invalidateCache(String server);
}
private class LockCheckerSession {
private final HashSet<Pair<String,String>> okLocks = new HashSet<>();
private final HashSet<Pair<String,String>> invalidLocks = new HashSet<>();
private TabletLocation checkLock(TabletLocation tl) {
// the goal of this class is to minimize calls out to lockChecker under that assumption that
// its a resource synchronized among many threads... want to
// avoid fine grained synchronization when binning lots of mutations or ranges... remember
// decisions from the lockChecker in thread local unsynchronized
// memory
if (tl == null) {
return null;
}
Pair<String,String> lock = new Pair<>(tl.getTserverLocation(), tl.getTserverSession());
if (okLocks.contains(lock)) {
return tl;
}
if (invalidLocks.contains(lock)) {
return null;
}
if (lockChecker.isLockHeld(tl.getTserverLocation(), tl.getTserverSession())) {
okLocks.add(lock);
return tl;
}
if (log.isTraceEnabled()) {
log.trace("Tablet server {} {} no longer holds its lock", tl.getTserverLocation(),
tl.getTserverSession());
}
invalidLocks.add(lock);
return null;
}
}
public TabletLocatorImpl(TableId tableId, TabletLocator parent, TabletLocationObtainer tlo,
TabletServerLockChecker tslc) {
this.tableId = tableId;
this.parent = parent;
this.locationObtainer = tlo;
this.lockChecker = tslc;
this.lastTabletRow = new Text(tableId.canonical());
lastTabletRow.append(new byte[] {'<'}, 0, 1);
}
@Override
public <T extends Mutation> void binMutations(ClientContext context, List<T> mutations,
Map<String,TabletServerMutations<T>> binnedMutations, List<T> failures)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Binning {} mutations for table {}", Thread.currentThread().getId(),
mutations.size(), tableId);
timer = new OpTimer().start();
}
ArrayList<T> notInCache = new ArrayList<>();
Text row = new Text();
LockCheckerSession lcSession = new LockCheckerSession();
rLock.lock();
try {
processInvalidated(context, lcSession);
// for this to be efficient rows need to be in sorted order, but always sorting is slow...
// therefore only sort the
// stuff not in the cache.... it is most efficient to pass _locateTablet rows in sorted order
// For this to be efficient, need to avoid fine grained synchronization and fine grained
// logging.
// Therefore methods called by this are not synchronized and should not log.
for (T mutation : mutations) {
row.set(mutation.getRow());
TabletLocation tl = locateTabletInCache(row);
if (tl == null || !addMutation(binnedMutations, mutation, tl, lcSession)) {
notInCache.add(mutation);
}
}
} finally {
rLock.unlock();
}
if (!notInCache.isEmpty()) {
notInCache.sort((o1, o2) -> WritableComparator.compareBytes(o1.getRow(), 0,
o1.getRow().length, o2.getRow(), 0, o2.getRow().length));
wLock.lock();
try {
boolean failed = false;
for (T mutation : notInCache) {
if (failed) {
// when one table does not return a location, something is probably
// screwy, go ahead and fail everything.
failures.add(mutation);
continue;
}
row.set(mutation.getRow());
TabletLocation tl = _locateTablet(context, row, false, false, false, lcSession);
if (tl == null || !addMutation(binnedMutations, mutation, tl, lcSession)) {
failures.add(mutation);
failed = true;
}
}
} finally {
wLock.unlock();
}
}
if (timer != null) {
timer.stop();
log.trace("tid={} Binned {} mutations for table {} to {} tservers in {}",
Thread.currentThread().getId(), mutations.size(), tableId, binnedMutations.size(),
String.format("%.3f secs", timer.scale(SECONDS)));
}
}
private <T extends Mutation> boolean addMutation(
Map<String,TabletServerMutations<T>> binnedMutations, T mutation, TabletLocation tl,
LockCheckerSession lcSession) {
TabletServerMutations<T> tsm = binnedMutations.get(tl.getTserverLocation());
if (tsm == null) {
// do lock check once per tserver here to make binning faster
boolean lockHeld = lcSession.checkLock(tl) != null;
if (lockHeld) {
tsm = new TabletServerMutations<>(tl.getTserverSession());
binnedMutations.put(tl.getTserverLocation(), tsm);
} else {
return false;
}
}
// its possible the same tserver could be listed with different sessions
if (tsm.getSession().equals(tl.getTserverSession())) {
tsm.addMutation(tl.getExtent(), mutation);
return true;
}
return false;
}
static boolean isContiguous(List<TabletLocation> tabletLocations) {
Iterator<TabletLocation> iter = tabletLocations.iterator();
KeyExtent prevExtent = iter.next().getExtent();
while (iter.hasNext()) {
KeyExtent currExtent = iter.next().getExtent();
if (!currExtent.isPreviousExtent(prevExtent)) {
return false;
}
prevExtent = currExtent;
}
return true;
}
private List<Range> binRanges(ClientContext context, List<Range> ranges,
Map<String,Map<KeyExtent,List<Range>>> binnedRanges, boolean useCache,
LockCheckerSession lcSession)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
List<Range> failures = new ArrayList<>();
List<TabletLocation> tabletLocations = new ArrayList<>();
boolean lookupFailed = false;
l1: for (Range range : ranges) {
tabletLocations.clear();
Text startRow;
if (range.getStartKey() != null) {
startRow = range.getStartKey().getRow();
} else {
startRow = new Text();
}
TabletLocation tl = null;
if (useCache) {
tl = lcSession.checkLock(locateTabletInCache(startRow));
} else if (!lookupFailed) {
tl = _locateTablet(context, startRow, false, false, false, lcSession);
}
if (tl == null) {
failures.add(range);
if (!useCache) {
lookupFailed = true;
}
continue;
}
tabletLocations.add(tl);
while (tl.getExtent().endRow() != null
&& !range.afterEndKey(new Key(tl.getExtent().endRow()).followingKey(PartialKey.ROW))) {
if (useCache) {
Text row = new Text(tl.getExtent().endRow());
row.append(new byte[] {0}, 0, 1);
tl = lcSession.checkLock(locateTabletInCache(row));
} else {
tl = _locateTablet(context, tl.getExtent().endRow(), true, false, false, lcSession);
}
if (tl == null) {
failures.add(range);
if (!useCache) {
lookupFailed = true;
}
continue l1;
}
tabletLocations.add(tl);
}
// Ensure the extents found are non overlapping and have no holes. When reading some extents
// from the cache and other from the metadata table in the loop above we may end up with
// non-contiguous extents. This can happen when a subset of exents are placed in the cache and
// then after that merges and splits happen.
if (isContiguous(tabletLocations)) {
for (TabletLocation tl2 : tabletLocations) {
TabletLocatorImpl.addRange(binnedRanges, tl2.getTserverLocation(), tl2.getExtent(),
range);
}
} else {
failures.add(range);
if (!useCache) {
lookupFailed = true;
}
}
}
return failures;
}
@Override
public List<Range> binRanges(ClientContext context, List<Range> ranges,
Map<String,Map<KeyExtent,List<Range>>> binnedRanges)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
/*
* For this to be efficient, need to avoid fine grained synchronization and fine grained
* logging. Therefore methods called by this are not synchronized and should not log.
*/
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Binning {} ranges for table {}", Thread.currentThread().getId(),
ranges.size(), tableId);
timer = new OpTimer().start();
}
LockCheckerSession lcSession = new LockCheckerSession();
List<Range> failures;
rLock.lock();
try {
processInvalidated(context, lcSession);
// for this to be optimal, need to look ranges up in sorted order when
// ranges are not present in cache... however do not want to always
// sort ranges... therefore try binning ranges using only the cache
// and sort whatever fails and retry
failures = binRanges(context, ranges, binnedRanges, true, lcSession);
} finally {
rLock.unlock();
}
if (!failures.isEmpty()) {
// sort failures by range start key
Collections.sort(failures);
// try lookups again
wLock.lock();
try {
failures = binRanges(context, failures, binnedRanges, false, lcSession);
} finally {
wLock.unlock();
}
}
if (timer != null) {
timer.stop();
log.trace("tid={} Binned {} ranges for table {} to {} tservers in {}",
Thread.currentThread().getId(), ranges.size(), tableId, binnedRanges.size(),
String.format("%.3f secs", timer.scale(SECONDS)));
}
return failures;
}
@Override
public void invalidateCache(KeyExtent failedExtent) {
wLock.lock();
try {
badExtents.add(failedExtent);
} finally {
wLock.unlock();
}
if (log.isTraceEnabled()) {
log.trace("Invalidated extent={}", failedExtent);
}
}
@Override
public void invalidateCache(Collection<KeyExtent> keySet) {
wLock.lock();
try {
badExtents.addAll(keySet);
} finally {
wLock.unlock();
}
if (log.isTraceEnabled()) {
log.trace("Invalidated {} cache entries for table {}", keySet.size(), tableId);
}
}
@Override
public void invalidateCache(ClientContext context, String server) {
int invalidatedCount = 0;
wLock.lock();
try {
for (TabletLocation cacheEntry : metaCache.values()) {
if (cacheEntry.getTserverLocation().equals(server)) {
badExtents.add(cacheEntry.getExtent());
invalidatedCount++;
}
}
} finally {
wLock.unlock();
}
lockChecker.invalidateCache(server);
if (log.isTraceEnabled()) {
log.trace("invalidated {} cache entries table={} server={}", invalidatedCount, tableId,
server);
}
}
@Override
public void invalidateCache() {
int invalidatedCount;
wLock.lock();
try {
invalidatedCount = metaCache.size();
metaCache.clear();
} finally {
wLock.unlock();
}
if (log.isTraceEnabled()) {
log.trace("invalidated all {} cache entries for table={}", invalidatedCount, tableId);
}
}
@Override
public TabletLocation locateTablet(ClientContext context, Text row, boolean skipRow,
boolean retry) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Locating tablet table={} row={} skipRow={} retry={}",
Thread.currentThread().getId(), tableId, TextUtil.truncate(row), skipRow, retry);
timer = new OpTimer().start();
}
while (true) {
LockCheckerSession lcSession = new LockCheckerSession();
TabletLocation tl = _locateTablet(context, row, skipRow, retry, true, lcSession);
if (retry && tl == null) {
sleepUninterruptibly(100, MILLISECONDS);
if (log.isTraceEnabled()) {
log.trace("Failed to locate tablet containing row {} in table {}, will retry...",
TextUtil.truncate(row), tableId);
}
continue;
}
if (timer != null) {
timer.stop();
log.trace("tid={} Located tablet {} at {} in {}", Thread.currentThread().getId(),
(tl == null ? "null" : tl.getExtent()), (tl == null ? "null" : tl.getTserverLocation()),
String.format("%.3f secs", timer.scale(SECONDS)));
}
return tl;
}
}
private void lookupTabletLocation(ClientContext context, Text row, boolean retry,
LockCheckerSession lcSession)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
Text metadataRow = new Text(tableId.canonical());
metadataRow.append(new byte[] {';'}, 0, 1);
metadataRow.append(row.getBytes(), 0, row.getLength());
TabletLocation ptl = parent.locateTablet(context, metadataRow, false, retry);
if (ptl != null) {
TabletLocations locations =
locationObtainer.lookupTablet(context, ptl, metadataRow, lastTabletRow, parent);
while (locations != null && locations.getLocations().isEmpty()
&& locations.getLocationless().isEmpty()) {
// try the next tablet, the current tablet does not have any tablets that overlap the row
Text er = ptl.getExtent().endRow();
if (er != null && er.compareTo(lastTabletRow) < 0) {
// System.out.println("er "+er+" ltr "+lastTabletRow);
ptl = parent.locateTablet(context, er, true, retry);
if (ptl != null) {
locations =
locationObtainer.lookupTablet(context, ptl, metadataRow, lastTabletRow, parent);
} else {
break;
}
} else {
break;
}
}
if (locations == null) {
return;
}
// cannot assume the list contains contiguous key extents... so it is probably
// best to deal with each extent individually
Text lastEndRow = null;
for (TabletLocation tabletLocation : locations.getLocations()) {
KeyExtent ke = tabletLocation.getExtent();
TabletLocation locToCache;
// create new location if current prevEndRow == endRow
if ((lastEndRow != null) && (ke.prevEndRow() != null)
&& ke.prevEndRow().equals(lastEndRow)) {
locToCache = new TabletLocation(new KeyExtent(ke.tableId(), ke.endRow(), lastEndRow),
tabletLocation.getTserverLocation(), tabletLocation.getTserverSession());
} else {
locToCache = tabletLocation;
}
// save endRow for next iteration
lastEndRow = locToCache.getExtent().endRow();
updateCache(locToCache, lcSession);
}
}
}
private void updateCache(TabletLocation tabletLocation, LockCheckerSession lcSession) {
if (!tabletLocation.getExtent().tableId().equals(tableId)) {
// sanity check
throw new IllegalStateException(
"Unexpected extent returned " + tableId + " " + tabletLocation.getExtent());
}
if (tabletLocation.getTserverLocation() == null) {
// sanity check
throw new IllegalStateException(
"Cannot add null locations to cache " + tableId + " " + tabletLocation.getExtent());
}
// clear out any overlapping extents in cache
removeOverlapping(metaCache, tabletLocation.getExtent());
// do not add to cache unless lock is held
if (lcSession.checkLock(tabletLocation) == null) {
return;
}
// add it to cache
Text er = tabletLocation.getExtent().endRow();
if (er == null) {
er = MAX_TEXT;
}
metaCache.put(er, tabletLocation);
if (!badExtents.isEmpty()) {
removeOverlapping(badExtents, tabletLocation.getExtent());
}
}
static void removeOverlapping(TreeMap<Text,TabletLocation> metaCache, KeyExtent nke) {
Iterator<Entry<Text,TabletLocation>> iter;
if (nke.prevEndRow() == null) {
iter = metaCache.entrySet().iterator();
} else {
Text row = rowAfterPrevRow(nke);
SortedMap<Text,TabletLocation> tailMap = metaCache.tailMap(row);
iter = tailMap.entrySet().iterator();
}
while (iter.hasNext()) {
Entry<Text,TabletLocation> entry = iter.next();
KeyExtent ke = entry.getValue().getExtent();
if (stopRemoving(nke, ke)) {
break;
}
iter.remove();
}
}
private static boolean stopRemoving(KeyExtent nke, KeyExtent ke) {
return ke.prevEndRow() != null && nke.endRow() != null
&& ke.prevEndRow().compareTo(nke.endRow()) >= 0;
}
private static Text rowAfterPrevRow(KeyExtent nke) {
Text row = new Text(nke.prevEndRow());
row.append(new byte[] {0}, 0, 1);
return row;
}
static void removeOverlapping(TreeSet<KeyExtent> extents, KeyExtent nke) {
for (KeyExtent overlapping : KeyExtent.findOverlapping(nke, extents)) {
extents.remove(overlapping);
}
}
private TabletLocation locateTabletInCache(Text row) {
Entry<Text,TabletLocation> entry = metaCache.ceilingEntry(row);
if (entry != null) {
KeyExtent ke = entry.getValue().getExtent();
if (ke.prevEndRow() == null || ke.prevEndRow().compareTo(row) < 0) {
return entry.getValue();
}
}
return null;
}
protected TabletLocation _locateTablet(ClientContext context, Text row, boolean skipRow,
boolean retry, boolean lock, LockCheckerSession lcSession)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
if (skipRow) {
row = new Text(row);
row.append(new byte[] {0}, 0, 1);
}
TabletLocation tl;
if (lock) {
rLock.lock();
try {
tl = processInvalidatedAndCheckLock(context, lcSession, row);
} finally {
rLock.unlock();
}
} else {
tl = processInvalidatedAndCheckLock(context, lcSession, row);
}
if (tl == null) {
// not in cache, so obtain info
if (lock) {
wLock.lock();
try {
tl = lookupTabletLocationAndCheckLock(context, row, retry, lcSession);
} finally {
wLock.unlock();
}
} else {
tl = lookupTabletLocationAndCheckLock(context, row, retry, lcSession);
}
}
return tl;
}
private TabletLocation lookupTabletLocationAndCheckLock(ClientContext context, Text row,
boolean retry, LockCheckerSession lcSession)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
lookupTabletLocation(context, row, retry, lcSession);
return lcSession.checkLock(locateTabletInCache(row));
}
private TabletLocation processInvalidatedAndCheckLock(ClientContext context,
LockCheckerSession lcSession, Text row)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
processInvalidated(context, lcSession);
return lcSession.checkLock(locateTabletInCache(row));
}
@SuppressFBWarnings(value = {"UL_UNRELEASED_LOCK", "UL_UNRELEASED_LOCK_EXCEPTION_PATH"},
justification = "locking is confusing, but probably correct")
private void processInvalidated(ClientContext context, LockCheckerSession lcSession)
throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
if (badExtents.isEmpty()) {
return;
}
final boolean writeLockHeld = rwLock.isWriteLockedByCurrentThread();
try {
if (!writeLockHeld) {
rLock.unlock();
wLock.lock();
if (badExtents.isEmpty()) {
return;
}
}
List<Range> lookups = new ArrayList<>(badExtents.size());
for (KeyExtent be : badExtents) {
lookups.add(be.toMetaRange());
removeOverlapping(metaCache, be);
}
lookups = Range.mergeOverlapping(lookups);
Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<>();
parent.binRanges(context, lookups, binnedRanges);
// randomize server order
ArrayList<String> tabletServers = new ArrayList<>(binnedRanges.keySet());
Collections.shuffle(tabletServers);
for (String tserver : tabletServers) {
List<TabletLocation> locations =
locationObtainer.lookupTablets(context, tserver, binnedRanges.get(tserver), parent);
for (TabletLocation tabletLocation : locations) {
updateCache(tabletLocation, lcSession);
}
}
} finally {
if (!writeLockHeld) {
rLock.lock();
wLock.unlock();
}
}
}
protected static void addRange(Map<String,Map<KeyExtent,List<Range>>> binnedRanges,
String location, KeyExtent ke, Range range) {
binnedRanges.computeIfAbsent(location, k -> new HashMap<>())
.computeIfAbsent(ke, k -> new ArrayList<>()).add(range);
}
}
| 9,869 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/AcceptableThriftTableOperationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import org.apache.accumulo.core.clientImpl.thrift.TableOperation;
import org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType;
import org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.fate.AcceptableException;
/**
* Concrete implementation of {@link AcceptableException} for table operations.
*/
public class AcceptableThriftTableOperationException extends ThriftTableOperationException
implements AcceptableException {
private static final long serialVersionUID = 1L;
public AcceptableThriftTableOperationException(String tableId, String tableName,
TableOperation op, TableOperationExceptionType type, String description) {
super(tableId, tableName, op, type, description);
}
}
| 9,870 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/InstanceOperationsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.SECONDS;
import static java.util.stream.Collectors.toList;
import static org.apache.accumulo.core.rpc.ThriftUtil.createClient;
import static org.apache.accumulo.core.rpc.ThriftUtil.createTransport;
import static org.apache.accumulo.core.rpc.ThriftUtil.getClient;
import static org.apache.accumulo.core.rpc.ThriftUtil.returnClient;
import java.util.ArrayList;
import java.util.Collections;
import java.util.ConcurrentModificationException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.function.Consumer;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.ActiveCompaction;
import org.apache.accumulo.core.client.admin.ActiveCompaction.CompactionHost;
import org.apache.accumulo.core.client.admin.ActiveScan;
import org.apache.accumulo.core.client.admin.InstanceOperations;
import org.apache.accumulo.core.clientImpl.thrift.ConfigurationType;
import org.apache.accumulo.core.clientImpl.thrift.TVersionedProperties;
import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.conf.DeprecatedPropertyUtil;
import org.apache.accumulo.core.data.InstanceId;
import org.apache.accumulo.core.fate.zookeeper.ZooCache;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes;
import org.apache.accumulo.core.tabletscan.thrift.TabletScanClientService;
import org.apache.accumulo.core.tabletserver.thrift.TabletServerClientService.Client;
import org.apache.accumulo.core.trace.TraceUtil;
import org.apache.accumulo.core.util.AddressUtil;
import org.apache.accumulo.core.util.LocalityGroupUtil;
import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
import org.apache.accumulo.core.util.Retry;
import org.apache.accumulo.core.util.compaction.ExternalCompactionUtil;
import org.apache.thrift.TException;
import org.apache.thrift.transport.TTransport;
import org.slf4j.LoggerFactory;
import com.google.common.net.HostAndPort;
/**
* Provides a class for administering the accumulo instance
*/
public class InstanceOperationsImpl implements InstanceOperations {
private final ClientContext context;
public InstanceOperationsImpl(ClientContext context) {
checkArgument(context != null, "context is null");
this.context = context;
}
@Override
public void setProperty(final String property, final String value)
throws AccumuloException, AccumuloSecurityException, IllegalArgumentException {
checkArgument(property != null, "property is null");
checkArgument(value != null, "value is null");
DeprecatedPropertyUtil.getReplacementName(property, (log, replacement) -> {
// force a warning on the client side, but send the name the user used to the server-side
// to trigger a warning in the server logs, and to handle it there
log.warn("{} was deprecated and will be removed in a future release;"
+ " setting its replacement {} instead", property, replacement);
});
ThriftClientTypes.MANAGER.executeVoid(context, client -> client
.setSystemProperty(TraceUtil.traceInfo(), context.rpcCreds(), property, value));
checkLocalityGroups(property);
}
private Map<String,String> tryToModifyProperties(final Consumer<Map<String,String>> mapMutator)
throws AccumuloException, AccumuloSecurityException, IllegalArgumentException {
checkArgument(mapMutator != null, "mapMutator is null");
final TVersionedProperties vProperties = ThriftClientTypes.CLIENT.execute(context,
client -> client.getVersionedSystemProperties(TraceUtil.traceInfo(), context.rpcCreds()));
mapMutator.accept(vProperties.getProperties());
// A reference to the map was passed to the user, maybe they still have the reference and are
// modifying it. Buggy Accumulo code could attempt to make modifications to the map after this
// point. Because of these potential issues, create an immutable snapshot of the map so that
// from here on the code is assured to always be dealing with the same map.
vProperties.setProperties(Map.copyOf(vProperties.getProperties()));
for (Map.Entry<String,String> entry : vProperties.getProperties().entrySet()) {
final String property = Objects.requireNonNull(entry.getKey(), "property key is null");
DeprecatedPropertyUtil.getReplacementName(property, (log, replacement) -> {
// force a warning on the client side, but send the name the user used to the
// server-side
// to trigger a warning in the server logs, and to handle it there
log.warn("{} was deprecated and will be removed in a future release;"
+ " setting its replacement {} instead", property, replacement);
});
checkLocalityGroups(property);
}
// Send to server
ThriftClientTypes.MANAGER.executeVoid(context, client -> client
.modifySystemProperties(TraceUtil.traceInfo(), context.rpcCreds(), vProperties));
return vProperties.getProperties();
}
@Override
public Map<String,String> modifyProperties(final Consumer<Map<String,String>> mapMutator)
throws AccumuloException, AccumuloSecurityException, IllegalArgumentException {
var log = LoggerFactory.getLogger(InstanceOperationsImpl.class);
Retry retry =
Retry.builder().infiniteRetries().retryAfter(25, MILLISECONDS).incrementBy(25, MILLISECONDS)
.maxWait(30, SECONDS).backOffFactor(1.5).logInterval(3, MINUTES).createRetry();
while (true) {
try {
var props = tryToModifyProperties(mapMutator);
retry.logCompletion(log, "Modifying instance properties");
return props;
} catch (ConcurrentModificationException cme) {
try {
retry.logRetry(log,
"Unable to modify instance properties for because of concurrent modification");
retry.waitForNextAttempt(log, "Modify instance properties");
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} finally {
retry.useRetry();
}
}
}
@Override
public void removeProperty(final String property)
throws AccumuloException, AccumuloSecurityException {
checkArgument(property != null, "property is null");
DeprecatedPropertyUtil.getReplacementName(property, (log, replacement) -> {
// force a warning on the client side, but send the name the user used to the server-side
// to trigger a warning in the server logs, and to handle it there
log.warn("{} was deprecated and will be removed in a future release; assuming user meant"
+ " its replacement {} and will remove that instead", property, replacement);
});
ThriftClientTypes.MANAGER.executeVoid(context,
client -> client.removeSystemProperty(TraceUtil.traceInfo(), context.rpcCreds(), property));
checkLocalityGroups(property);
}
private void checkLocalityGroups(String propChanged)
throws AccumuloSecurityException, AccumuloException {
if (LocalityGroupUtil.isLocalityGroupProperty(propChanged)) {
try {
LocalityGroupUtil.checkLocalityGroups(getSystemConfiguration());
} catch (LocalityGroupConfigurationError | RuntimeException e) {
LoggerFactory.getLogger(this.getClass()).warn("Changing '" + propChanged
+ "' resulted in bad locality group config. This may be a transient situation since "
+ "the config spreads over multiple properties. Setting properties in a different "
+ "order may help. Even though this warning was displayed, the property was updated. "
+ "Please check your config to ensure consistency.", e);
}
}
}
@Override
public Map<String,String> getSystemConfiguration()
throws AccumuloException, AccumuloSecurityException {
return ThriftClientTypes.CLIENT.execute(context, client -> client
.getConfiguration(TraceUtil.traceInfo(), context.rpcCreds(), ConfigurationType.CURRENT));
}
@Override
public Map<String,String> getSiteConfiguration()
throws AccumuloException, AccumuloSecurityException {
return ThriftClientTypes.CLIENT.execute(context, client -> client
.getConfiguration(TraceUtil.traceInfo(), context.rpcCreds(), ConfigurationType.SITE));
}
@Override
public List<String> getManagerLocations() {
return context.getManagerLocations();
}
@Override
public Set<String> getScanServers() {
return Set.copyOf(context.getScanServers().keySet());
}
@Override
public List<String> getTabletServers() {
ZooCache cache = context.getZooCache();
String path = context.getZooKeeperRoot() + Constants.ZTSERVERS;
List<String> results = new ArrayList<>();
for (String candidate : cache.getChildren(path)) {
var children = cache.getChildren(path + "/" + candidate);
if (children != null && !children.isEmpty()) {
var copy = new ArrayList<>(children);
Collections.sort(copy);
var data = cache.get(path + "/" + candidate + "/" + copy.get(0));
if (data != null && !"manager".equals(new String(data, UTF_8))) {
results.add(candidate);
}
}
}
return results;
}
@Override
public List<ActiveScan> getActiveScans(String tserver)
throws AccumuloException, AccumuloSecurityException {
final var parsedTserver = HostAndPort.fromString(tserver);
TabletScanClientService.Client client = null;
try {
client = getClient(ThriftClientTypes.TABLET_SCAN, parsedTserver, context);
List<ActiveScan> as = new ArrayList<>();
for (var activeScan : client.getActiveScans(TraceUtil.traceInfo(), context.rpcCreds())) {
try {
as.add(new ActiveScanImpl(context, activeScan));
} catch (TableNotFoundException e) {
throw new AccumuloException(e);
}
}
return as;
} catch (ThriftSecurityException e) {
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (TException e) {
throw new AccumuloException(e);
} finally {
if (client != null) {
returnClient(client, context);
}
}
}
@Override
public boolean testClassLoad(final String className, final String asTypeName)
throws AccumuloException, AccumuloSecurityException {
return ThriftClientTypes.CLIENT.execute(context, client -> client
.checkClass(TraceUtil.traceInfo(), context.rpcCreds(), className, asTypeName));
}
@Override
public List<ActiveCompaction> getActiveCompactions(String tserver)
throws AccumuloException, AccumuloSecurityException {
final var parsedTserver = HostAndPort.fromString(tserver);
Client client = null;
try {
client = getClient(ThriftClientTypes.TABLET_SERVER, parsedTserver, context);
List<ActiveCompaction> as = new ArrayList<>();
for (var tac : client.getActiveCompactions(TraceUtil.traceInfo(), context.rpcCreds())) {
as.add(new ActiveCompactionImpl(context, tac, parsedTserver, CompactionHost.Type.TSERVER));
}
return as;
} catch (ThriftSecurityException e) {
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (TException e) {
throw new AccumuloException(e);
} finally {
if (client != null) {
returnClient(client, context);
}
}
}
@Override
public List<ActiveCompaction> getActiveCompactions()
throws AccumuloException, AccumuloSecurityException {
Map<String,List<HostAndPort>> compactors = ExternalCompactionUtil.getCompactorAddrs(context);
List<String> tservers = getTabletServers();
int numThreads = Math.max(4, Math.min((tservers.size() + compactors.size()) / 10, 256));
var executorService =
context.threadPools().createFixedThreadPool(numThreads, "getactivecompactions", false);
try {
List<Future<List<ActiveCompaction>>> futures = new ArrayList<>();
for (String tserver : tservers) {
futures.add(executorService.submit(() -> getActiveCompactions(tserver)));
}
compactors.values().forEach(compactorList -> {
for (HostAndPort compactorAddr : compactorList) {
Callable<List<ActiveCompaction>> task =
() -> ExternalCompactionUtil.getActiveCompaction(compactorAddr, context).stream()
.map(tac -> new ActiveCompactionImpl(context, tac, compactorAddr,
CompactionHost.Type.COMPACTOR))
.collect(toList());
futures.add(executorService.submit(task));
}
});
List<ActiveCompaction> ret = new ArrayList<>();
for (Future<List<ActiveCompaction>> future : futures) {
try {
ret.addAll(future.get());
} catch (InterruptedException | ExecutionException e) {
if (e.getCause() instanceof ThriftSecurityException) {
ThriftSecurityException tse = (ThriftSecurityException) e.getCause();
throw new AccumuloSecurityException(tse.user, tse.code, e);
}
throw new AccumuloException(e);
}
}
return ret;
} finally {
executorService.shutdown();
}
}
@Override
public void ping(String tserver) throws AccumuloException {
try (
TTransport transport = createTransport(AddressUtil.parseAddress(tserver, false), context)) {
Client client = createClient(ThriftClientTypes.TABLET_SERVER, transport);
client.getTabletServerStatus(TraceUtil.traceInfo(), context.rpcCreds());
} catch (TException e) {
throw new AccumuloException(e);
}
}
@Override
public void waitForBalance() throws AccumuloException {
try {
ThriftClientTypes.MANAGER.executeVoid(context,
client -> client.waitForBalance(TraceUtil.traceInfo()));
} catch (AccumuloSecurityException ex) {
// should never happen
throw new IllegalStateException("Unexpected exception thrown", ex);
}
}
/**
* Given a zooCache and instanceId, look up the instance name.
*/
public static String lookupInstanceName(ZooCache zooCache, InstanceId instanceId) {
checkArgument(zooCache != null, "zooCache is null");
checkArgument(instanceId != null, "instanceId is null");
for (String name : zooCache.getChildren(Constants.ZROOT + Constants.ZINSTANCES)) {
var bytes = zooCache.get(Constants.ZROOT + Constants.ZINSTANCES + "/" + name);
InstanceId iid = InstanceId.of(new String(bytes, UTF_8));
if (iid.equals(instanceId)) {
return name;
}
}
return null;
}
@Override
public InstanceId getInstanceId() {
return context.getInstanceID();
}
}
| 9,871 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/RootTabletLocator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.LOCATION;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.clientImpl.TabletLocatorImpl.TabletServerLockChecker;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.fate.zookeeper.ZooCache;
import org.apache.accumulo.core.metadata.RootTable;
import org.apache.accumulo.core.metadata.schema.Ample.ReadConsistency;
import org.apache.accumulo.core.metadata.schema.TabletMetadata.Location;
import org.apache.accumulo.core.metadata.schema.TabletMetadata.LocationType;
import org.apache.accumulo.core.util.OpTimer;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RootTabletLocator extends TabletLocator {
private final TabletServerLockChecker lockChecker;
RootTabletLocator(TabletServerLockChecker lockChecker) {
this.lockChecker = lockChecker;
}
@Override
public <T extends Mutation> void binMutations(ClientContext context, List<T> mutations,
Map<String,TabletServerMutations<T>> binnedMutations, List<T> failures) {
TabletLocation rootTabletLocation = getRootTabletLocation(context);
if (rootTabletLocation != null) {
var tsm = new TabletServerMutations<T>(rootTabletLocation.getTserverSession());
for (T mutation : mutations) {
tsm.addMutation(RootTable.EXTENT, mutation);
}
binnedMutations.put(rootTabletLocation.getTserverLocation(), tsm);
} else {
failures.addAll(mutations);
}
}
@Override
public List<Range> binRanges(ClientContext context, List<Range> ranges,
Map<String,Map<KeyExtent,List<Range>>> binnedRanges) {
TabletLocation rootTabletLocation = getRootTabletLocation(context);
if (rootTabletLocation != null) {
for (Range range : ranges) {
TabletLocatorImpl.addRange(binnedRanges, rootTabletLocation.getTserverLocation(),
RootTable.EXTENT, range);
}
return Collections.emptyList();
}
return ranges;
}
@Override
public void invalidateCache(KeyExtent failedExtent) {}
@Override
public void invalidateCache(Collection<KeyExtent> keySet) {}
@Override
public void invalidateCache(ClientContext context, String server) {
ZooCache zooCache = context.getZooCache();
String root = context.getZooKeeperRoot() + Constants.ZTSERVERS;
zooCache.clear(root + "/" + server);
}
@Override
public void invalidateCache() {}
protected TabletLocation getRootTabletLocation(ClientContext context) {
Logger log = LoggerFactory.getLogger(this.getClass());
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Looking up root tablet location in zookeeper.",
Thread.currentThread().getId());
timer = new OpTimer().start();
}
Location loc = context.getAmple()
.readTablet(RootTable.EXTENT, ReadConsistency.EVENTUAL, LOCATION).getLocation();
if (timer != null) {
timer.stop();
log.trace("tid={} Found root tablet at {} in {}", Thread.currentThread().getId(), loc,
String.format("%.3f secs", timer.scale(SECONDS)));
}
if (loc == null || loc.getType() != LocationType.CURRENT) {
return null;
}
String server = loc.getHostPort();
if (lockChecker.isLockHeld(server, loc.getSession())) {
return new TabletLocation(RootTable.EXTENT, server, loc.getSession());
} else {
return null;
}
}
@Override
public TabletLocation locateTablet(ClientContext context, Text row, boolean skipRow,
boolean retry) {
TabletLocation location = getRootTabletLocation(context);
// Always retry when finding the root tablet
while (retry && location == null) {
sleepUninterruptibly(500, MILLISECONDS);
location = getRootTabletLocation(context);
}
return location;
}
}
| 9,872 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/TabletServerBatchReaderIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.SECONDS;
import java.io.IOException;
import java.time.Duration;
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.SampleNotPresentException;
import org.apache.accumulo.core.client.ScannerBase.ConsistencyLevel;
import org.apache.accumulo.core.client.TableDeletedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.TimedOutException;
import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.dataImpl.thrift.InitialMultiScan;
import org.apache.accumulo.core.dataImpl.thrift.MultiScanResult;
import org.apache.accumulo.core.dataImpl.thrift.TKeyExtent;
import org.apache.accumulo.core.dataImpl.thrift.TKeyValue;
import org.apache.accumulo.core.dataImpl.thrift.TRange;
import org.apache.accumulo.core.rpc.ThriftUtil;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.spi.scan.ScanServerAttempt;
import org.apache.accumulo.core.spi.scan.ScanServerSelections;
import org.apache.accumulo.core.spi.scan.ScanServerSelector;
import org.apache.accumulo.core.tabletscan.thrift.ScanServerBusyException;
import org.apache.accumulo.core.tabletscan.thrift.TSampleNotPresentException;
import org.apache.accumulo.core.tabletscan.thrift.TabletScanClientService;
import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
import org.apache.accumulo.core.trace.TraceUtil;
import org.apache.accumulo.core.util.ByteBufferUtil;
import org.apache.accumulo.core.util.OpTimer;
import org.apache.accumulo.core.util.Retry;
import org.apache.thrift.TApplicationException;
import org.apache.thrift.TException;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.net.HostAndPort;
public class TabletServerBatchReaderIterator implements Iterator<Entry<Key,Value>> {
private static final Logger log = LoggerFactory.getLogger(TabletServerBatchReaderIterator.class);
private final ClientContext context;
private final TableId tableId;
private final String tableName;
private Authorizations authorizations = Authorizations.EMPTY;
private final int numThreads;
private final ExecutorService queryThreadPool;
private final ScannerOptions options;
private ArrayBlockingQueue<List<Entry<Key,Value>>> resultsQueue;
private Iterator<Entry<Key,Value>> batchIterator;
private List<Entry<Key,Value>> batch;
private static final List<Entry<Key,Value>> LAST_BATCH = new ArrayList<>();
private final Object nextLock = new Object();
private long failSleepTime = 100;
private volatile Throwable fatalException = null;
private Map<String,TimeoutTracker> timeoutTrackers;
private Set<String> timedoutServers;
private final long retryTimeout;
private TabletLocator locator;
private ScanServerAttemptsImpl scanAttempts = new ScanServerAttemptsImpl();
public interface ResultReceiver {
void receive(List<Entry<Key,Value>> entries);
}
public TabletServerBatchReaderIterator(ClientContext context, TableId tableId, String tableName,
Authorizations authorizations, ArrayList<Range> ranges, int numThreads,
ExecutorService queryThreadPool, ScannerOptions scannerOptions, long retryTimeout) {
this.context = context;
this.tableId = tableId;
this.tableName = tableName;
this.authorizations = authorizations;
this.numThreads = numThreads;
this.queryThreadPool = queryThreadPool;
this.options = new ScannerOptions(scannerOptions);
resultsQueue = new ArrayBlockingQueue<>(numThreads);
this.locator = new TimeoutTabletLocator(retryTimeout, context, tableId);
timeoutTrackers = Collections.synchronizedMap(new HashMap<>());
timedoutServers = Collections.synchronizedSet(new HashSet<>());
this.retryTimeout = retryTimeout;
if (!options.fetchedColumns.isEmpty()) {
ArrayList<Range> ranges2 = new ArrayList<>(ranges.size());
for (Range range : ranges) {
ranges2.add(range.bound(options.fetchedColumns.first(), options.fetchedColumns.last()));
}
ranges = ranges2;
}
ResultReceiver rr = entries -> {
try {
resultsQueue.put(entries);
} catch (InterruptedException e) {
if (TabletServerBatchReaderIterator.this.queryThreadPool.isShutdown()) {
log.debug("Failed to add Batch Scan result", e);
} else {
log.warn("Failed to add Batch Scan result", e);
}
fatalException = e;
throw new IllegalStateException(e);
}
};
try {
lookup(ranges, rr);
} catch (AccumuloException | AccumuloSecurityException | TableNotFoundException e) {
throw new IllegalStateException("Failed to create iterator", e);
}
}
@Override
public boolean hasNext() {
synchronized (nextLock) {
if (batch == LAST_BATCH) {
return false;
}
if (batch != null && batchIterator.hasNext()) {
return true;
}
// don't have one cached, try to cache one and return success
try {
batch = null;
while (batch == null && fatalException == null && !queryThreadPool.isShutdown()) {
batch = resultsQueue.poll(1, SECONDS);
}
if (fatalException != null) {
if (fatalException instanceof RuntimeException) {
throw (RuntimeException) fatalException;
} else {
throw new IllegalStateException(fatalException);
}
}
if (queryThreadPool.isShutdown()) {
String shortMsg =
"The BatchScanner was unexpectedly closed while this Iterator was still in use.";
log.error("{} Ensure that a reference to the BatchScanner is retained"
+ " so that it can be closed when this Iterator is exhausted. Not"
+ " retaining a reference to the BatchScanner guarantees that you are"
+ " leaking threads in your client JVM.", shortMsg);
throw new IllegalStateException(
shortMsg + " Ensure proper handling of the BatchScanner.");
}
batchIterator = batch.iterator();
return batch != LAST_BATCH;
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
}
@Override
public Entry<Key,Value> next() {
// if there's one waiting, or hasNext() can get one, return it
synchronized (nextLock) {
if (hasNext()) {
return batchIterator.next();
} else {
throw new NoSuchElementException();
}
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private synchronized void lookup(List<Range> ranges, ResultReceiver receiver)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
List<Column> columns = new ArrayList<>(options.fetchedColumns);
ranges = Range.mergeOverlapping(ranges);
Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<>();
binRanges(locator, ranges, binnedRanges);
doLookups(binnedRanges, receiver, columns);
}
private void binRanges(TabletLocator tabletLocator, List<Range> ranges,
Map<String,Map<KeyExtent,List<Range>>> binnedRanges)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
int lastFailureSize = Integer.MAX_VALUE;
Retry retry = Retry.builder().infiniteRetries().retryAfter(100, MILLISECONDS)
.incrementBy(100, MILLISECONDS).maxWait(10, SECONDS).backOffFactor(1.07)
.logInterval(1, MINUTES).createFactory().createRetry();
while (true) {
binnedRanges.clear();
List<Range> failures = tabletLocator.binRanges(context, ranges, binnedRanges);
if (failures.isEmpty()) {
break;
} else {
// tried to only do table state checks when failures.size() == ranges.size(), however this
// did
// not work because nothing ever invalidated entries in the tabletLocator cache... so even
// though
// the table was deleted the tablet locator entries for the deleted table were not
// cleared... so
// need to always do the check when failures occur
if (failures.size() >= lastFailureSize) {
context.requireNotDeleted(tableId);
context.requireNotOffline(tableId, tableName);
}
lastFailureSize = failures.size();
if (log.isTraceEnabled()) {
log.trace("Failed to bin {} ranges, tablet locations were null, retrying in 100ms",
failures.size());
}
try {
retry.waitForNextAttempt(log, "binRanges retry failures");
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
}
// truncate the ranges to within the tablets... this makes it easier to know what work
// needs to be redone when failures occurs and tablets have merged or split
Map<String,Map<KeyExtent,List<Range>>> binnedRanges2 = new HashMap<>();
for (Entry<String,Map<KeyExtent,List<Range>>> entry : binnedRanges.entrySet()) {
Map<KeyExtent,List<Range>> tabletMap = new HashMap<>();
binnedRanges2.put(entry.getKey(), tabletMap);
for (Entry<KeyExtent,List<Range>> tabletRanges : entry.getValue().entrySet()) {
Range tabletRange = tabletRanges.getKey().toDataRange();
List<Range> clippedRanges = new ArrayList<>();
tabletMap.put(tabletRanges.getKey(), clippedRanges);
for (Range range : tabletRanges.getValue()) {
clippedRanges.add(tabletRange.clip(range));
}
}
}
binnedRanges.clear();
binnedRanges.putAll(binnedRanges2);
}
private void processFailures(Map<KeyExtent,List<Range>> failures, ResultReceiver receiver,
List<Column> columns, Duration scanServerSelectorDelay)
throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
if (log.isTraceEnabled()) {
log.trace("Failed to execute multiscans against {} tablets, retrying...", failures.size());
}
try {
if (scanServerSelectorDelay != null) {
Thread.sleep(scanServerSelectorDelay.toMillis());
} else {
Thread.sleep(failSleepTime);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
// We were interrupted (close called on batchscanner) just exit
log.debug("Exiting failure processing on interrupt");
return;
}
failSleepTime = Math.min(5000, failSleepTime * 2);
Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<>();
List<Range> allRanges = new ArrayList<>();
for (List<Range> ranges : failures.values()) {
allRanges.addAll(ranges);
}
// since the first call to binRanges clipped the ranges to within a tablet, we should not get
// only
// bin to the set of failed tablets
binRanges(locator, allRanges, binnedRanges);
doLookups(binnedRanges, receiver, columns);
}
private String getTableInfo() {
return context.getPrintableTableInfoFromId(tableId);
}
private class QueryTask implements Runnable {
private String tsLocation;
private Map<KeyExtent,List<Range>> tabletsRanges;
private ResultReceiver receiver;
private Semaphore semaphore = null;
private final Map<KeyExtent,List<Range>> failures;
private List<Column> columns;
private int semaphoreSize;
private final long busyTimeout;
private final ScanServerAttemptReporter reporter;
private final Duration scanServerSelectorDelay;
QueryTask(String tsLocation, Map<KeyExtent,List<Range>> tabletsRanges,
Map<KeyExtent,List<Range>> failures, ResultReceiver receiver, List<Column> columns,
long busyTimeout, ScanServerAttemptReporter reporter, Duration scanServerSelectorDelay) {
this.tsLocation = tsLocation;
this.tabletsRanges = tabletsRanges;
this.receiver = receiver;
this.columns = columns;
this.failures = failures;
this.busyTimeout = busyTimeout;
this.reporter = reporter;
this.scanServerSelectorDelay = scanServerSelectorDelay;
}
void setSemaphore(Semaphore semaphore, int semaphoreSize) {
this.semaphore = semaphore;
this.semaphoreSize = semaphoreSize;
}
@Override
public void run() {
String threadName = Thread.currentThread().getName();
Thread.currentThread()
.setName(threadName + " looking up " + tabletsRanges.size() + " ranges at " + tsLocation);
log.debug("looking up {} ranges at {}", tabletsRanges.size(), tsLocation);
Map<KeyExtent,List<Range>> unscanned = new HashMap<>();
Map<KeyExtent,List<Range>> tsFailures = new HashMap<>();
try {
TimeoutTracker timeoutTracker = timeoutTrackers.get(tsLocation);
if (timeoutTracker == null) {
timeoutTracker = new TimeoutTracker(tsLocation, timedoutServers, retryTimeout);
timeoutTrackers.put(tsLocation, timeoutTracker);
}
doLookup(context, tsLocation, tabletsRanges, tsFailures, unscanned, receiver, columns,
options, authorizations, timeoutTracker, busyTimeout);
if (!tsFailures.isEmpty()) {
locator.invalidateCache(tsFailures.keySet());
synchronized (failures) {
failures.putAll(tsFailures);
}
}
} catch (IOException e) {
if (!TabletServerBatchReaderIterator.this.queryThreadPool.isShutdown()) {
synchronized (failures) {
failures.putAll(tsFailures);
failures.putAll(unscanned);
}
locator.invalidateCache(context, tsLocation);
}
log.debug("IOException thrown", e);
ScanServerAttempt.Result result = ScanServerAttempt.Result.ERROR;
if (e.getCause() instanceof ScanServerBusyException) {
result = ScanServerAttempt.Result.BUSY;
}
reporter.report(result);
} catch (AccumuloSecurityException e) {
e.setTableInfo(getTableInfo());
log.debug("AccumuloSecurityException thrown", e);
context.clearTableListCache();
if (context.tableNodeExists(tableId)) {
fatalException = e;
} else {
fatalException = new TableDeletedException(tableId.canonical());
}
} catch (SampleNotPresentException e) {
fatalException = e;
} catch (Exception t) {
if (queryThreadPool.isShutdown()) {
log.debug("Caught exception, but queryThreadPool is shutdown", t);
} else {
log.warn("Caught exception, but queryThreadPool is not shutdown", t);
}
fatalException = t;
} catch (Throwable t) {
fatalException = t;
throw t; // let uncaught exception handler deal with the Error
} finally {
semaphore.release();
Thread.currentThread().setName(threadName);
if (semaphore.tryAcquire(semaphoreSize)) {
// finished processing all queries
if (fatalException == null && !failures.isEmpty()) {
// there were some failures
try {
processFailures(failures, receiver, columns, scanServerSelectorDelay);
} catch (TableNotFoundException | AccumuloException e) {
log.debug("{}", e.getMessage(), e);
fatalException = e;
} catch (AccumuloSecurityException e) {
e.setTableInfo(getTableInfo());
log.debug("{}", e.getMessage(), e);
fatalException = e;
} catch (Exception t) {
log.debug("{}", t.getMessage(), t);
fatalException = t;
}
if (fatalException != null) {
// we are finished with this batch query
if (!resultsQueue.offer(LAST_BATCH)) {
log.debug(
"Could not add to result queue after seeing fatalException in processFailures",
fatalException);
}
}
} else {
// we are finished with this batch query
if (fatalException != null) {
if (!resultsQueue.offer(LAST_BATCH)) {
log.debug("Could not add to result queue after seeing fatalException",
fatalException);
}
} else {
try {
resultsQueue.put(LAST_BATCH);
} catch (InterruptedException e) {
fatalException = e;
if (!resultsQueue.offer(LAST_BATCH)) {
log.debug("Could not add to result queue after seeing fatalException",
fatalException);
}
}
}
}
}
}
}
}
private void doLookups(Map<String,Map<KeyExtent,List<Range>>> binnedRanges,
final ResultReceiver receiver, List<Column> columns) {
int maxTabletsPerRequest = Integer.MAX_VALUE;
long busyTimeout = 0;
Duration scanServerSelectorDelay = null;
Map<String,ScanServerAttemptReporter> reporters = Map.of();
if (options.getConsistencyLevel().equals(ConsistencyLevel.EVENTUAL)) {
var scanServerData = rebinToScanServers(binnedRanges);
busyTimeout = scanServerData.actions.getBusyTimeout().toMillis();
reporters = scanServerData.reporters;
scanServerSelectorDelay = scanServerData.actions.getDelay();
binnedRanges = scanServerData.binnedRanges;
} else {
// when there are lots of threads and a few tablet servers
// it is good to break request to tablet servers up, the
// following code determines if this is the case
if (numThreads / binnedRanges.size() > 1) {
int totalNumberOfTablets = 0;
for (Entry<String,Map<KeyExtent,List<Range>>> entry : binnedRanges.entrySet()) {
totalNumberOfTablets += entry.getValue().size();
}
maxTabletsPerRequest = totalNumberOfTablets / numThreads;
if (maxTabletsPerRequest == 0) {
maxTabletsPerRequest = 1;
}
}
}
log.debug("timed out servers: {}", timedoutServers);
log.debug("binned range servers: {}", binnedRanges.keySet());
if (timedoutServers.containsAll(binnedRanges.keySet())) {
// all servers have timed out
throw new TimedOutException(timedoutServers);
}
Map<KeyExtent,List<Range>> failures = new HashMap<>();
if (!timedoutServers.isEmpty()) {
// go ahead and fail any timed out servers
for (Iterator<Entry<String,Map<KeyExtent,List<Range>>>> iterator =
binnedRanges.entrySet().iterator(); iterator.hasNext();) {
Entry<String,Map<KeyExtent,List<Range>>> entry = iterator.next();
if (timedoutServers.contains(entry.getKey())) {
failures.putAll(entry.getValue());
iterator.remove();
}
}
}
// randomize tabletserver order... this will help when there are multiple
// batch readers and writers running against accumulo
List<String> locations = new ArrayList<>(binnedRanges.keySet());
Collections.shuffle(locations);
List<QueryTask> queryTasks = new ArrayList<>();
for (final String tsLocation : locations) {
final Map<KeyExtent,List<Range>> tabletsRanges = binnedRanges.get(tsLocation);
if (maxTabletsPerRequest == Integer.MAX_VALUE || tabletsRanges.size() == 1) {
QueryTask queryTask = new QueryTask(tsLocation, tabletsRanges, failures, receiver, columns,
busyTimeout, reporters.getOrDefault(tsLocation, r -> {}), scanServerSelectorDelay);
queryTasks.add(queryTask);
} else {
HashMap<KeyExtent,List<Range>> tabletSubset = new HashMap<>();
for (Entry<KeyExtent,List<Range>> entry : tabletsRanges.entrySet()) {
tabletSubset.put(entry.getKey(), entry.getValue());
if (tabletSubset.size() >= maxTabletsPerRequest) {
QueryTask queryTask =
new QueryTask(tsLocation, tabletSubset, failures, receiver, columns, busyTimeout,
reporters.getOrDefault(tsLocation, r -> {}), scanServerSelectorDelay);
queryTasks.add(queryTask);
tabletSubset = new HashMap<>();
}
}
if (!tabletSubset.isEmpty()) {
QueryTask queryTask = new QueryTask(tsLocation, tabletSubset, failures, receiver, columns,
busyTimeout, reporters.getOrDefault(tsLocation, r -> {}), scanServerSelectorDelay);
queryTasks.add(queryTask);
}
}
}
final Semaphore semaphore = new Semaphore(queryTasks.size());
semaphore.acquireUninterruptibly(queryTasks.size());
for (QueryTask queryTask : queryTasks) {
queryTask.setSemaphore(semaphore, queryTasks.size());
queryThreadPool.execute(queryTask);
}
}
private static class ScanServerData {
Map<String,Map<KeyExtent,List<Range>>> binnedRanges;
ScanServerSelections actions;
Map<String,ScanServerAttemptReporter> reporters;
}
private ScanServerData rebinToScanServers(Map<String,Map<KeyExtent,List<Range>>> binnedRanges) {
ScanServerSelector ecsm = context.getScanServerSelector();
List<TabletIdImpl> tabletIds =
binnedRanges.values().stream().flatMap(extentMap -> extentMap.keySet().stream())
.map(TabletIdImpl::new).collect(Collectors.toList());
// get a snapshot of this once,not each time the plugin request it
var scanAttemptsSnapshot = scanAttempts.snapshot();
ScanServerSelector.SelectorParameters params = new ScanServerSelector.SelectorParameters() {
@Override
public Collection<TabletId> getTablets() {
return Collections.unmodifiableCollection(tabletIds);
}
@Override
public Collection<? extends ScanServerAttempt> getAttempts(TabletId tabletId) {
return scanAttemptsSnapshot.getOrDefault(tabletId, Set.of());
}
@Override
public Map<String,String> getHints() {
return options.executionHints;
}
};
var actions = ecsm.selectServers(params);
Map<KeyExtent,String> extentToTserverMap = new HashMap<>();
Map<KeyExtent,List<Range>> extentToRangesMap = new HashMap<>();
binnedRanges.forEach((server, extentMap) -> {
extentMap.forEach((extent, ranges) -> {
extentToTserverMap.put(extent, server);
extentToRangesMap.put(extent, ranges);
});
});
Map<String,Map<KeyExtent,List<Range>>> binnedRanges2 = new HashMap<>();
Map<String,ScanServerAttemptReporter> reporters = new HashMap<>();
for (TabletIdImpl tabletId : tabletIds) {
KeyExtent extent = tabletId.toKeyExtent();
String serverToUse = actions.getScanServer(tabletId);
if (serverToUse == null) {
// no scan server was given so use the tablet server
serverToUse = extentToTserverMap.get(extent);
log.trace("For tablet {} scan server selector chose tablet_server", tabletId);
} else {
log.trace("For tablet {} scan server selector chose scan_server:{}", tabletId, serverToUse);
}
var rangeMap = binnedRanges2.computeIfAbsent(serverToUse, k -> new HashMap<>());
List<Range> ranges = extentToRangesMap.get(extent);
rangeMap.put(extent, ranges);
var server = serverToUse;
reporters.computeIfAbsent(serverToUse, k -> scanAttempts.createReporter(server, tabletId));
}
ScanServerData ssd = new ScanServerData();
ssd.binnedRanges = binnedRanges2;
ssd.actions = actions;
ssd.reporters = reporters;
log.trace("Scan server selector chose delay:{} busyTimeout:{}", actions.getDelay(),
actions.getBusyTimeout());
return ssd;
}
static void trackScanning(Map<KeyExtent,List<Range>> failures,
Map<KeyExtent,List<Range>> unscanned, MultiScanResult scanResult) {
// translate returned failures, remove them from unscanned, and add them to failures
// @formatter:off
Map<KeyExtent, List<Range>> retFailures = scanResult.failures.entrySet().stream().collect(Collectors.toMap(
entry -> KeyExtent.fromThrift(entry.getKey()),
entry -> entry.getValue().stream().map(Range::new).collect(Collectors.toList())
));
// @formatter:on
unscanned.keySet().removeAll(retFailures.keySet());
failures.putAll(retFailures);
// translate full scans and remove them from unscanned
Set<KeyExtent> fullScans =
scanResult.fullScans.stream().map(KeyExtent::fromThrift).collect(Collectors.toSet());
unscanned.keySet().removeAll(fullScans);
// remove partial scan from unscanned
if (scanResult.partScan != null) {
KeyExtent ke = KeyExtent.fromThrift(scanResult.partScan);
Key nextKey = new Key(scanResult.partNextKey);
ListIterator<Range> iterator = unscanned.get(ke).listIterator();
while (iterator.hasNext()) {
Range range = iterator.next();
if (range.afterEndKey(nextKey) || (nextKey.equals(range.getEndKey())
&& scanResult.partNextKeyInclusive != range.isEndKeyInclusive())) {
iterator.remove();
} else if (range.contains(nextKey)) {
iterator.remove();
Range partRange = new Range(nextKey, scanResult.partNextKeyInclusive, range.getEndKey(),
range.isEndKeyInclusive());
iterator.add(partRange);
}
}
}
}
private static class TimeoutTracker {
String server;
Set<String> badServers;
long timeOut;
long activityTime;
Long firstErrorTime = null;
TimeoutTracker(String server, Set<String> badServers, long timeOut) {
this(timeOut);
this.server = server;
this.badServers = badServers;
}
TimeoutTracker(long timeOut) {
this.timeOut = timeOut;
}
void startingScan() {
activityTime = System.currentTimeMillis();
}
void check() throws IOException {
if (System.currentTimeMillis() - activityTime > timeOut) {
badServers.add(server);
throw new IOException(
"Time exceeded " + (System.currentTimeMillis() - activityTime) + " " + server);
}
}
void madeProgress() {
activityTime = System.currentTimeMillis();
firstErrorTime = null;
}
void errorOccured() {
if (firstErrorTime == null) {
firstErrorTime = activityTime;
} else if (System.currentTimeMillis() - firstErrorTime > timeOut) {
badServers.add(server);
}
}
public long getTimeOut() {
return timeOut;
}
}
public static void doLookup(ClientContext context, String server,
Map<KeyExtent,List<Range>> requested, Map<KeyExtent,List<Range>> failures,
Map<KeyExtent,List<Range>> unscanned, ResultReceiver receiver, List<Column> columns,
ScannerOptions options, Authorizations authorizations)
throws IOException, AccumuloSecurityException, AccumuloServerException {
doLookup(context, server, requested, failures, unscanned, receiver, columns, options,
authorizations, new TimeoutTracker(Long.MAX_VALUE), 0L);
}
static void doLookup(ClientContext context, String server, Map<KeyExtent,List<Range>> requested,
Map<KeyExtent,List<Range>> failures, Map<KeyExtent,List<Range>> unscanned,
ResultReceiver receiver, List<Column> columns, ScannerOptions options,
Authorizations authorizations, TimeoutTracker timeoutTracker, long busyTimeout)
throws IOException, AccumuloSecurityException, AccumuloServerException {
if (requested.isEmpty()) {
return;
}
// copy requested to unscanned map. we will remove ranges as they are scanned in trackScanning()
for (Entry<KeyExtent,List<Range>> entry : requested.entrySet()) {
ArrayList<Range> ranges = new ArrayList<>();
for (Range range : entry.getValue()) {
ranges.add(new Range(range));
}
unscanned.put(KeyExtent.copyOf(entry.getKey()), ranges);
}
timeoutTracker.startingScan();
try {
final HostAndPort parsedServer = HostAndPort.fromString(server);
final TabletScanClientService.Client client;
if (timeoutTracker.getTimeOut() < context.getClientTimeoutInMillis()) {
client = ThriftUtil.getClient(ThriftClientTypes.TABLET_SCAN, parsedServer, context,
timeoutTracker.getTimeOut());
} else {
client = ThriftUtil.getClient(ThriftClientTypes.TABLET_SCAN, parsedServer, context);
}
try {
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace(
"tid={} Starting multi scan, tserver={} #tablets={} #ranges={} ssil={} ssio={}",
Thread.currentThread().getId(), server, requested.size(),
sumSizes(requested.values()), options.serverSideIteratorList,
options.serverSideIteratorOptions);
timer = new OpTimer().start();
}
TabletType ttype = TabletType.type(requested.keySet());
boolean waitForWrites = !ThriftScanner.serversWaitedForWrites.get(ttype).contains(server);
// @formatter:off
Map<TKeyExtent, List<TRange>> thriftTabletRanges = requested.entrySet().stream().collect(Collectors.toMap(
entry -> entry.getKey().toThrift(),
entry -> entry.getValue().stream().map(Range::toThrift).collect(Collectors.toList())
));
// @formatter:on
Map<String,String> execHints =
options.executionHints.isEmpty() ? null : options.executionHints;
InitialMultiScan imsr = client.startMultiScan(TraceUtil.traceInfo(), context.rpcCreds(),
thriftTabletRanges, columns.stream().map(Column::toThrift).collect(Collectors.toList()),
options.serverSideIteratorList, options.serverSideIteratorOptions,
ByteBufferUtil.toByteBuffers(authorizations.getAuthorizations()), waitForWrites,
SamplerConfigurationImpl.toThrift(options.getSamplerConfiguration()),
options.batchTimeout, options.classLoaderContext, execHints, busyTimeout);
if (waitForWrites) {
ThriftScanner.serversWaitedForWrites.get(ttype).add(server.toString());
}
MultiScanResult scanResult = imsr.result;
if (timer != null) {
timer.stop();
log.trace("tid={} Got 1st multi scan results, #results={} {} in {}",
Thread.currentThread().getId(), scanResult.results.size(),
(scanResult.more ? "scanID=" + imsr.scanID : ""),
String.format("%.3f secs", timer.scale(SECONDS)));
}
ArrayList<Entry<Key,Value>> entries = new ArrayList<>(scanResult.results.size());
for (TKeyValue kv : scanResult.results) {
entries.add(new SimpleImmutableEntry<>(new Key(kv.key), new Value(kv.value)));
}
if (!entries.isEmpty()) {
receiver.receive(entries);
}
if (!entries.isEmpty() || !scanResult.fullScans.isEmpty()) {
timeoutTracker.madeProgress();
}
trackScanning(failures, unscanned, scanResult);
AtomicLong nextOpid = new AtomicLong();
while (scanResult.more) {
timeoutTracker.check();
if (timer != null) {
log.trace("tid={} oid={} Continuing multi scan, scanid={}",
Thread.currentThread().getId(), nextOpid.get(), imsr.scanID);
timer.reset().start();
}
scanResult = client.continueMultiScan(TraceUtil.traceInfo(), imsr.scanID, busyTimeout);
if (timer != null) {
timer.stop();
log.trace("tid={} oid={} Got more multi scan results, #results={} {} in {}",
Thread.currentThread().getId(), nextOpid.getAndIncrement(),
scanResult.results.size(), (scanResult.more ? " scanID=" + imsr.scanID : ""),
String.format("%.3f secs", timer.scale(SECONDS)));
}
entries = new ArrayList<>(scanResult.results.size());
for (TKeyValue kv : scanResult.results) {
entries.add(new SimpleImmutableEntry<>(new Key(kv.key), new Value(kv.value)));
}
if (!entries.isEmpty()) {
receiver.receive(entries);
}
if (!entries.isEmpty() || !scanResult.fullScans.isEmpty()) {
timeoutTracker.madeProgress();
}
trackScanning(failures, unscanned, scanResult);
}
client.closeMultiScan(TraceUtil.traceInfo(), imsr.scanID);
} finally {
ThriftUtil.returnClient(client, context);
}
} catch (TTransportException e) {
log.debug("Server : {} msg : {}", server, e.getMessage());
timeoutTracker.errorOccured();
throw new IOException(e);
} catch (ThriftSecurityException e) {
log.debug("Server : {} msg : {}", server, e.getMessage(), e);
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (TApplicationException e) {
log.debug("Server : {} msg : {}", server, e.getMessage(), e);
throw new AccumuloServerException(server, e);
} catch (NoSuchScanIDException e) {
log.debug("Server : {} msg : {}", server, e.getMessage(), e);
throw new IOException(e);
} catch (ScanServerBusyException e) {
log.debug("Server : {} msg : {}", server, e.getMessage(), e);
throw new IOException(e);
} catch (TSampleNotPresentException e) {
log.debug("Server : " + server + " msg : " + e.getMessage(), e);
String tableInfo = "?";
if (e.getExtent() != null) {
TableId tableId = KeyExtent.fromThrift(e.getExtent()).tableId();
tableInfo = context.getPrintableTableInfoFromId(tableId);
}
String message = "Table " + tableInfo + " does not have sampling configured or built";
throw new SampleNotPresentException(message, e);
} catch (TException e) {
log.debug("Server : {} msg : {}", server, e.getMessage(), e);
timeoutTracker.errorOccured();
throw new IOException(e);
}
}
static int sumSizes(Collection<List<Range>> values) {
int sum = 0;
for (List<Range> list : values) {
sum += list.size();
}
return sum;
}
}
| 9,873 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientConfConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.function.Predicate;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.conf.HadoopCredentialProvider;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.rpc.SaslConnectionParams;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ClientConfConverter {
private static final Logger log = LoggerFactory.getLogger(ClientConfConverter.class);
private static final Map<String,String> accumuloConfToClientProps = new HashMap<>();
private static final Map<String,String> clientPropsToAccumuloConf = new HashMap<>();
static {
// mapping of ClientProperty equivalents in AccumuloConfiguration
Map<ClientProperty,Property> conversions = new HashMap<>();
conversions.put(ClientProperty.INSTANCE_ZOOKEEPERS, Property.INSTANCE_ZK_HOST);
conversions.put(ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT, Property.INSTANCE_ZK_TIMEOUT);
conversions.put(ClientProperty.SASL_ENABLED, Property.INSTANCE_RPC_SASL_ENABLED);
conversions.put(ClientProperty.SASL_QOP, Property.RPC_SASL_QOP);
conversions.put(ClientProperty.SSL_ENABLED, Property.INSTANCE_RPC_SSL_ENABLED);
conversions.put(ClientProperty.SSL_KEYSTORE_PASSWORD, Property.RPC_SSL_KEYSTORE_PASSWORD);
conversions.put(ClientProperty.SSL_KEYSTORE_PATH, Property.RPC_SSL_KEYSTORE_PATH);
conversions.put(ClientProperty.SSL_KEYSTORE_TYPE, Property.RPC_SSL_KEYSTORE_TYPE);
conversions.put(ClientProperty.SSL_TRUSTSTORE_PASSWORD, Property.RPC_SSL_TRUSTSTORE_PASSWORD);
conversions.put(ClientProperty.SSL_TRUSTSTORE_PATH, Property.RPC_SSL_TRUSTSTORE_PATH);
conversions.put(ClientProperty.SSL_TRUSTSTORE_TYPE, Property.RPC_SSL_TRUSTSTORE_TYPE);
conversions.put(ClientProperty.SSL_USE_JSSE, Property.RPC_USE_JSSE);
for (Map.Entry<ClientProperty,Property> entry : conversions.entrySet()) {
accumuloConfToClientProps.put(entry.getValue().getKey(), entry.getKey().getKey());
clientPropsToAccumuloConf.put(entry.getKey().getKey(), entry.getValue().getKey());
}
}
public static Properties toProperties(AccumuloConfiguration config) {
final var propsExtractedFromConfig = new Properties();
// Extract kerberos primary from the config
final String serverPrincipal = config.get(Property.GENERAL_KERBEROS_PRINCIPAL);
if (serverPrincipal != null && !serverPrincipal.isEmpty()) {
var krbName = new KerberosName(serverPrincipal);
propsExtractedFromConfig.setProperty(ClientProperty.SASL_KERBEROS_SERVER_PRIMARY.getKey(),
krbName.getServiceName());
}
// Extract the remaining properties from the config
config.stream().filter(e -> accumuloConfToClientProps.keySet().contains(e.getKey()))
.forEach(e -> propsExtractedFromConfig.setProperty(e.getKey(), e.getValue()));
// For all the extracted properties, convert them to their ClientProperty names
final var convertedProps = new Properties();
propsExtractedFromConfig.forEach((k, v) -> {
String confKey = String.valueOf(k);
String val = String.valueOf(v);
String propKey = accumuloConfToClientProps.get(confKey);
convertedProps.setProperty(propKey == null ? confKey : propKey, val);
});
return convertedProps;
}
public static AccumuloConfiguration toAccumuloConf(Properties properties) {
final var convertedProps = new Properties();
for (String propKey : properties.stringPropertyNames()) {
String val = properties.getProperty(propKey);
String confKey = clientPropsToAccumuloConf.get(propKey);
if (propKey.equals(ClientProperty.SASL_KERBEROS_SERVER_PRIMARY.getKey())) {
confKey = Property.GENERAL_KERBEROS_PRINCIPAL.getKey();
// Avoid providing a realm since we don't know what it is...
val += "/_HOST@" + SaslConnectionParams.getDefaultRealm();
}
convertedProps.setProperty(confKey == null ? propKey : confKey, val);
if (propKey.equals(ClientProperty.SSL_KEYSTORE_PATH.getKey())) {
convertedProps.setProperty(Property.INSTANCE_RPC_SSL_CLIENT_AUTH.getKey(), "true");
}
}
final AccumuloConfiguration defaults = DefaultConfiguration.getInstance();
return new AccumuloConfiguration() {
@Override
public boolean isPropertySet(Property prop) {
return convertedProps.containsKey(prop.getKey());
}
@Override
public String get(Property property) {
final String key = property.getKey();
// Attempt to load sensitive properties from a CredentialProvider, if configured
if (property.isSensitive()) {
org.apache.hadoop.conf.Configuration hadoopConf = getHadoopConfiguration();
if (hadoopConf != null) {
char[] value = HadoopCredentialProvider.getValue(hadoopConf, key);
if (value != null) {
log.trace("Loaded sensitive value for {} from CredentialProvider", key);
return new String(value);
} else {
log.trace("Tried to load sensitive value for {} from CredentialProvider, "
+ "but none was found", key);
}
}
}
return convertedProps.getProperty(key, defaults.get(property));
}
@Override
public void getProperties(Map<String,String> props, Predicate<String> filter) {
defaults.getProperties(props, filter);
for (String key : convertedProps.stringPropertyNames()) {
if (filter.test(key)) {
props.put(key, convertedProps.getProperty(key));
}
}
// Attempt to load sensitive properties from a CredentialProvider, if configured
org.apache.hadoop.conf.Configuration hadoopConf = getHadoopConfiguration();
if (hadoopConf != null) {
for (String key : HadoopCredentialProvider.getKeys(hadoopConf)) {
if (!Property.isValidPropertyKey(key) || !Property.isSensitive(key)) {
continue;
}
if (filter.test(key)) {
char[] value = HadoopCredentialProvider.getValue(hadoopConf, key);
if (value != null) {
props.put(key, new String(value));
}
}
}
}
}
private org.apache.hadoop.conf.Configuration getHadoopConfiguration() {
String credProviderPaths = convertedProps
.getProperty(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
if (credProviderPaths != null && !credProviderPaths.isEmpty()) {
org.apache.hadoop.conf.Configuration hConf = new org.apache.hadoop.conf.Configuration();
HadoopCredentialProvider.setPath(hConf, credProviderPaths);
return hConf;
}
log.trace("Did not find credential provider configuration in ClientConfiguration");
return null;
}
};
}
}
| 9,874 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ScannerImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import static java.util.concurrent.TimeUnit.SECONDS;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
/**
* provides scanner functionality
*
* "Clients can iterate over multiple column families, and there are several mechanisms for limiting
* the rows, columns, and timestamps traversed by a scan. For example, we could restrict [a] scan
* ... to only produce anchors whose columns match [a] regular expression ..., or to only produce
* anchors whose timestamps fall within ten days of the current time."
*
*/
public class ScannerImpl extends ScannerOptions implements Scanner {
// keep a list of columns over which to scan
// keep track of the last thing read
// hopefully, we can track all the state in the scanner on the client
// and just query for the next highest row from the tablet server
private final ClientContext context;
private Authorizations authorizations;
private TableId tableId;
private int size;
private Range range;
private boolean isolated = false;
private long readaheadThreshold = Constants.SCANNER_DEFAULT_READAHEAD_THRESHOLD;
boolean closed = false;
private static final int MAX_ENTRIES = 16;
private long iterCount = 0;
// Create an LRU map of iterators that tracks the MAX_ENTRIES most recently used iterators. An LRU
// map is used to support the use case of a long lived scanner that constantly creates iterators
// and does not read all of the data. For this case do not want iterator tracking to consume too
// much memory. Also it would be best to avoid an RPC storm of close methods for thousands
// sessions that may have timed out.
private Map<ScannerIterator,Long> iters = new LinkedHashMap<>(MAX_ENTRIES + 1, .75F, true) {
private static final long serialVersionUID = 1L;
// This method is called just after a new entry has been added
@Override
public boolean removeEldestEntry(Map.Entry<ScannerIterator,Long> eldest) {
return size() > MAX_ENTRIES;
}
};
/**
* This is used for ScannerIterators to report their activity back to the scanner that created
* them.
*/
class Reporter {
void readBatch(ScannerIterator iter) {
synchronized (ScannerImpl.this) {
// This iter just had some activity, so access it in map so it becomes the most recently
// used.
iters.get(iter);
}
}
void finished(ScannerIterator iter) {
synchronized (ScannerImpl.this) {
iters.remove(iter);
}
}
}
private synchronized void ensureOpen() {
if (closed) {
throw new IllegalStateException("Scanner is closed");
}
}
public ScannerImpl(ClientContext context, TableId tableId, Authorizations authorizations) {
checkArgument(context != null, "context is null");
checkArgument(tableId != null, "tableId is null");
checkArgument(authorizations != null, "authorizations is null");
this.context = context;
this.tableId = tableId;
this.range = new Range((Key) null, (Key) null);
this.authorizations = authorizations;
this.size = Constants.SCAN_BATCH_SIZE;
}
@Override
public synchronized void setRange(Range range) {
ensureOpen();
checkArgument(range != null, "range is null");
this.range = range;
}
@Override
public synchronized Range getRange() {
ensureOpen();
return range;
}
@Override
public synchronized void setBatchSize(int size) {
ensureOpen();
if (size > 0) {
this.size = size;
} else {
throw new IllegalArgumentException("size must be greater than zero");
}
}
@Override
public synchronized int getBatchSize() {
ensureOpen();
return size;
}
@Override
public synchronized Iterator<Entry<Key,Value>> iterator() {
ensureOpen();
ScannerIterator iter = new ScannerIterator(context, tableId, authorizations, range, size,
getTimeout(SECONDS), this, isolated, readaheadThreshold, new Reporter());
iters.put(iter, iterCount++);
return iter;
}
@Override
public Authorizations getAuthorizations() {
ensureOpen();
return authorizations;
}
@Override
public synchronized void enableIsolation() {
ensureOpen();
this.isolated = true;
}
@Override
public synchronized void disableIsolation() {
ensureOpen();
this.isolated = false;
}
@Override
public synchronized void setReadaheadThreshold(long batches) {
ensureOpen();
if (batches < 0) {
throw new IllegalArgumentException(
"Number of batches before read-ahead must be non-negative");
}
readaheadThreshold = batches;
}
@Override
public synchronized long getReadaheadThreshold() {
ensureOpen();
return readaheadThreshold;
}
@Override
public synchronized void close() {
if (!closed) {
iters.forEach((iter, v) -> iter.close());
iters.clear();
}
closed = true;
}
}
| 9,875 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Suppliers.memoizeWithExpiration;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.LOCATION;
import java.lang.Thread.UncaughtExceptionHandler;
import java.lang.reflect.InvocationTargetException;
import java.net.URL;
import java.nio.file.Path;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchDeleter;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.ConditionalWriter;
import org.apache.accumulo.core.client.ConditionalWriterConfig;
import org.apache.accumulo.core.client.Durability;
import org.apache.accumulo.core.client.MultiTableBatchWriter;
import org.apache.accumulo.core.client.NamespaceNotFoundException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableDeletedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.TableOfflineException;
import org.apache.accumulo.core.client.admin.InstanceOperations;
import org.apache.accumulo.core.client.admin.NamespaceOperations;
import org.apache.accumulo.core.client.admin.SecurityOperations;
import org.apache.accumulo.core.client.admin.TableOperations;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.InstanceId;
import org.apache.accumulo.core.data.KeyValue;
import org.apache.accumulo.core.data.NamespaceId;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.fate.zookeeper.ZooCache;
import org.apache.accumulo.core.fate.zookeeper.ZooCache.ZcStat;
import org.apache.accumulo.core.fate.zookeeper.ZooCacheFactory;
import org.apache.accumulo.core.fate.zookeeper.ZooReader;
import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
import org.apache.accumulo.core.lock.ServiceLock;
import org.apache.accumulo.core.lock.ServiceLockData;
import org.apache.accumulo.core.lock.ServiceLockData.ThriftService;
import org.apache.accumulo.core.manager.state.tables.TableState;
import org.apache.accumulo.core.metadata.RootTable;
import org.apache.accumulo.core.metadata.schema.Ample;
import org.apache.accumulo.core.metadata.schema.Ample.ReadConsistency;
import org.apache.accumulo.core.metadata.schema.AmpleImpl;
import org.apache.accumulo.core.metadata.schema.TabletMetadata.Location;
import org.apache.accumulo.core.metadata.schema.TabletMetadata.LocationType;
import org.apache.accumulo.core.rpc.SaslConnectionParams;
import org.apache.accumulo.core.rpc.SslConnectionParams;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.securityImpl.thrift.TCredentials;
import org.apache.accumulo.core.singletons.SingletonManager;
import org.apache.accumulo.core.singletons.SingletonReservation;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.accumulo.core.spi.scan.ScanServerInfo;
import org.apache.accumulo.core.spi.scan.ScanServerSelector;
import org.apache.accumulo.core.util.OpTimer;
import org.apache.accumulo.core.util.Pair;
import org.apache.accumulo.core.util.tables.TableZooHelper;
import org.apache.accumulo.core.util.threads.ThreadPools;
import org.apache.accumulo.core.util.threads.Threads;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Suppliers;
/**
* This class represents any essential configuration and credentials needed to initiate RPC
* operations throughout the code. It is intended to represent a shared object that contains these
* things from when the client was first constructed. It is not public API, and is only an internal
* representation of the context in which a client is executing RPCs. If additional parameters are
* added to the public API that need to be used in the internals of Accumulo, they should be added
* to this object for later retrieval, rather than as a separate parameter. Any state in this object
* should be available at the time of its construction.
*/
public class ClientContext implements AccumuloClient {
private static final Logger log = LoggerFactory.getLogger(ClientContext.class);
private final ClientInfo info;
private InstanceId instanceId;
private final ZooReader zooReader;
private final ZooCache zooCache;
private Credentials creds;
private BatchWriterConfig batchWriterConfig;
private ConditionalWriterConfig conditionalWriterConfig;
private final AccumuloConfiguration serverConf;
private final Configuration hadoopConf;
// These fields are very frequently accessed (each time a connection is created) and expensive to
// compute, so cache them.
private final Supplier<Long> timeoutSupplier;
private final Supplier<SaslConnectionParams> saslSupplier;
private final Supplier<SslConnectionParams> sslSupplier;
private final Supplier<ScanServerSelector> scanServerSelectorSupplier;
private TCredentials rpcCreds;
private ThriftTransportPool thriftTransportPool;
private volatile boolean closed = false;
private SecurityOperations secops = null;
private final TableOperationsImpl tableops;
private final NamespaceOperations namespaceops;
private InstanceOperations instanceops = null;
private final SingletonReservation singletonReservation;
private final ThreadPools clientThreadPools;
private ThreadPoolExecutor cleanupThreadPool;
private ThreadPoolExecutor scannerReadaheadPool;
private void ensureOpen() {
if (closed) {
throw new IllegalStateException("This client was closed.");
}
}
private ScanServerSelector createScanServerSelector() {
String clazz = ClientProperty.SCAN_SERVER_SELECTOR.getValue(info.getProperties());
try {
Class<? extends ScanServerSelector> impl =
Class.forName(clazz).asSubclass(ScanServerSelector.class);
ScanServerSelector scanServerSelector = impl.getDeclaredConstructor().newInstance();
Map<String,String> sserverProps = new HashMap<>();
ClientProperty
.getPrefix(info.getProperties(), ClientProperty.SCAN_SERVER_SELECTOR_OPTS_PREFIX.getKey())
.forEach((k, v) -> {
sserverProps.put(
k.toString()
.substring(ClientProperty.SCAN_SERVER_SELECTOR_OPTS_PREFIX.getKey().length()),
v.toString());
});
scanServerSelector.init(new ScanServerSelector.InitParameters() {
@Override
public Map<String,String> getOptions() {
return Collections.unmodifiableMap(sserverProps);
}
@Override
public ServiceEnvironment getServiceEnv() {
return new ClientServiceEnvironmentImpl(ClientContext.this);
}
@Override
public Supplier<Collection<ScanServerInfo>> getScanServers() {
return () -> ClientContext.this.getScanServers().entrySet().stream()
.map(entry -> new ScanServerInfo() {
@Override
public String getAddress() {
return entry.getKey();
}
@Override
public String getGroup() {
return entry.getValue().getSecond();
}
}).collect(Collectors.toSet());
}
});
return scanServerSelector;
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException
| IllegalArgumentException | InvocationTargetException | NoSuchMethodException
| SecurityException e) {
throw new RuntimeException("Error creating ScanServerSelector implementation: " + clazz, e);
}
}
/**
* Create a client context with the provided configuration. Legacy client code must provide a
* no-op SingletonReservation to preserve behavior prior to 2.x. Clients since 2.x should call
* Accumulo.newClient() builder, which will create a client reservation in
* {@link ClientBuilderImpl#buildClient}
*/
public ClientContext(SingletonReservation reservation, ClientInfo info,
AccumuloConfiguration serverConf, UncaughtExceptionHandler ueh) {
this.info = info;
this.hadoopConf = info.getHadoopConf();
zooReader = new ZooReader(info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
zooCache =
new ZooCacheFactory().getZooCache(info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
this.serverConf = serverConf;
timeoutSupplier = memoizeWithExpiration(
() -> getConfiguration().getTimeInMillis(Property.GENERAL_RPC_TIMEOUT), 100, MILLISECONDS);
sslSupplier = Suppliers.memoize(() -> SslConnectionParams.forClient(getConfiguration()));
saslSupplier = memoizeWithExpiration(
() -> SaslConnectionParams.from(getConfiguration(), getCredentials().getToken()), 100,
MILLISECONDS);
scanServerSelectorSupplier =
memoizeWithExpiration(this::createScanServerSelector, 100, MILLISECONDS);
this.singletonReservation = Objects.requireNonNull(reservation);
this.tableops = new TableOperationsImpl(this);
this.namespaceops = new NamespaceOperationsImpl(this, tableops);
if (ueh == Threads.UEH) {
clientThreadPools = ThreadPools.getServerThreadPools();
} else {
// Provide a default UEH that just logs the error
if (ueh == null) {
clientThreadPools = ThreadPools.getClientThreadPools((t, e) -> {
log.error("Caught an Exception in client background thread: {}. Thread is dead.", t, e);
});
} else {
clientThreadPools = ThreadPools.getClientThreadPools(ueh);
}
}
}
public Ample getAmple() {
ensureOpen();
return new AmpleImpl(this);
}
public synchronized Future<List<KeyValue>>
submitScannerReadAheadTask(Callable<List<KeyValue>> c) {
ensureOpen();
if (scannerReadaheadPool == null) {
scannerReadaheadPool = clientThreadPools.createThreadPool(0, Integer.MAX_VALUE, 3L, SECONDS,
"Accumulo scanner read ahead thread", new SynchronousQueue<>(), true);
}
return scannerReadaheadPool.submit(c);
}
public synchronized void executeCleanupTask(Runnable r) {
ensureOpen();
if (cleanupThreadPool == null) {
cleanupThreadPool = clientThreadPools.createFixedThreadPool(1, 3, SECONDS,
"Conditional Writer Cleanup Thread", true);
}
this.cleanupThreadPool.execute(r);
}
/**
* @return ThreadPools instance optionally configured with client UncaughtExceptionHandler
*/
public ThreadPools threadPools() {
ensureOpen();
return clientThreadPools;
}
/**
* Retrieve the credentials used to construct this context
*/
public synchronized Credentials getCredentials() {
ensureOpen();
if (creds == null) {
creds = new Credentials(info.getPrincipal(), info.getAuthenticationToken());
}
return creds;
}
public String getPrincipal() {
ensureOpen();
return getCredentials().getPrincipal();
}
public AuthenticationToken getAuthenticationToken() {
ensureOpen();
return getCredentials().getToken();
}
public Properties getProperties() {
ensureOpen();
return info.getProperties();
}
/**
* Update the credentials in the current context after changing the current user's password or
* other auth token
*/
public synchronized void setCredentials(Credentials newCredentials) {
ensureOpen();
checkArgument(newCredentials != null, "newCredentials is null");
creds = newCredentials;
rpcCreds = null;
}
/**
* Retrieve the configuration used to construct this context
*/
public AccumuloConfiguration getConfiguration() {
ensureOpen();
return serverConf;
}
/**
* Retrieve the hadoop configuration
*/
public Configuration getHadoopConf() {
ensureOpen();
return this.hadoopConf;
}
/**
* Retrieve the universal RPC client timeout from the configuration
*/
public long getClientTimeoutInMillis() {
ensureOpen();
return timeoutSupplier.get();
}
/**
* Retrieve SSL/TLS configuration to initiate an RPC connection to a server
*/
public SslConnectionParams getClientSslParams() {
ensureOpen();
return sslSupplier.get();
}
/**
* Retrieve SASL configuration to initiate an RPC connection to a server
*/
public SaslConnectionParams getSaslParams() {
ensureOpen();
return saslSupplier.get();
}
static BatchWriterConfig getBatchWriterConfig(Properties props) {
BatchWriterConfig batchWriterConfig = new BatchWriterConfig();
Long maxMemory = ClientProperty.BATCH_WRITER_MEMORY_MAX.getBytes(props);
if (maxMemory != null) {
batchWriterConfig.setMaxMemory(maxMemory);
}
Long maxLatency = ClientProperty.BATCH_WRITER_LATENCY_MAX.getTimeInMillis(props);
if (maxLatency != null) {
batchWriterConfig.setMaxLatency(maxLatency, MILLISECONDS);
}
Long timeout = ClientProperty.BATCH_WRITER_TIMEOUT_MAX.getTimeInMillis(props);
if (timeout != null) {
batchWriterConfig.setTimeout(timeout, MILLISECONDS);
}
Integer maxThreads = ClientProperty.BATCH_WRITER_THREADS_MAX.getInteger(props);
if (maxThreads != null) {
batchWriterConfig.setMaxWriteThreads(maxThreads);
}
String durability = ClientProperty.BATCH_WRITER_DURABILITY.getValue(props);
if (!durability.isEmpty()) {
batchWriterConfig.setDurability(Durability.valueOf(durability.toUpperCase()));
}
return batchWriterConfig;
}
public synchronized BatchWriterConfig getBatchWriterConfig() {
ensureOpen();
if (batchWriterConfig == null) {
batchWriterConfig = getBatchWriterConfig(info.getProperties());
}
return batchWriterConfig;
}
/**
* @return map of live scan server addresses to lock uuids.
*/
public Map<String,Pair<UUID,String>> getScanServers() {
Map<String,Pair<UUID,String>> liveScanServers = new HashMap<>();
String root = this.getZooKeeperRoot() + Constants.ZSSERVERS;
var addrs = this.getZooCache().getChildren(root);
for (String addr : addrs) {
try {
final var zLockPath = ServiceLock.path(root + "/" + addr);
ZcStat stat = new ZcStat();
Optional<ServiceLockData> sld = ServiceLock.getLockData(getZooCache(), zLockPath, stat);
if (sld.isPresent()) {
UUID uuid = sld.orElseThrow().getServerUUID(ThriftService.TABLET_SCAN);
String group = sld.orElseThrow().getGroup(ThriftService.TABLET_SCAN);
liveScanServers.put(addr, new Pair<>(uuid, group));
}
} catch (IllegalArgumentException e) {
log.error("Error validating zookeeper scan server node: " + addr, e);
}
}
return liveScanServers;
}
/**
* @return the scan server selector implementation used for determining which scan servers will be
* used when performing an eventually consistent scan
*/
public ScanServerSelector getScanServerSelector() {
ensureOpen();
return scanServerSelectorSupplier.get();
}
static ConditionalWriterConfig getConditionalWriterConfig(Properties props) {
ConditionalWriterConfig conditionalWriterConfig = new ConditionalWriterConfig();
Long timeout = ClientProperty.CONDITIONAL_WRITER_TIMEOUT_MAX.getTimeInMillis(props);
if (timeout != null) {
conditionalWriterConfig.setTimeout(timeout, MILLISECONDS);
}
String durability = ClientProperty.CONDITIONAL_WRITER_DURABILITY.getValue(props);
if (!durability.isEmpty()) {
conditionalWriterConfig.setDurability(Durability.valueOf(durability.toUpperCase()));
}
Integer maxThreads = ClientProperty.CONDITIONAL_WRITER_THREADS_MAX.getInteger(props);
if (maxThreads != null) {
conditionalWriterConfig.setMaxWriteThreads(maxThreads);
}
return conditionalWriterConfig;
}
public synchronized ConditionalWriterConfig getConditionalWriterConfig() {
ensureOpen();
if (conditionalWriterConfig == null) {
conditionalWriterConfig = getConditionalWriterConfig(info.getProperties());
}
return conditionalWriterConfig;
}
/**
* Serialize the credentials just before initiating the RPC call
*/
public synchronized TCredentials rpcCreds() {
ensureOpen();
if (getCredentials().getToken().isDestroyed()) {
rpcCreds = null;
}
if (rpcCreds == null) {
rpcCreds = getCredentials().toThrift(getInstanceID());
}
return rpcCreds;
}
/**
* Returns the location of the tablet server that is serving the root tablet.
*
* @return location in "hostname:port" form
*/
public String getRootTabletLocation() {
ensureOpen();
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Looking up root tablet location in zookeeper.",
Thread.currentThread().getId());
timer = new OpTimer().start();
}
Location loc =
getAmple().readTablet(RootTable.EXTENT, ReadConsistency.EVENTUAL, LOCATION).getLocation();
if (timer != null) {
timer.stop();
log.trace("tid={} Found root tablet at {} in {}", Thread.currentThread().getId(), loc,
String.format("%.3f secs", timer.scale(SECONDS)));
}
if (loc == null || loc.getType() != LocationType.CURRENT) {
return null;
}
return loc.getHostPort();
}
/**
* Returns the location(s) of the accumulo manager and any redundant servers.
*
* @return a list of locations in "hostname:port" form
*/
public List<String> getManagerLocations() {
ensureOpen();
var zLockManagerPath =
ServiceLock.path(Constants.ZROOT + "/" + getInstanceID() + Constants.ZMANAGER_LOCK);
OpTimer timer = null;
if (log.isTraceEnabled()) {
log.trace("tid={} Looking up manager location in zookeeper at {}.",
Thread.currentThread().getId(), zLockManagerPath);
timer = new OpTimer().start();
}
Optional<ServiceLockData> sld = zooCache.getLockData(zLockManagerPath);
String location = null;
if (sld.isPresent()) {
location = sld.orElseThrow().getAddressString(ThriftService.MANAGER);
}
if (timer != null) {
timer.stop();
log.trace("tid={} Found manager at {} in {}", Thread.currentThread().getId(),
(location == null ? "null" : location), String.format("%.3f secs", timer.scale(SECONDS)));
}
if (location == null) {
return Collections.emptyList();
}
return Collections.singletonList(location);
}
/**
* Returns a unique string that identifies this instance of accumulo.
*
* @return a UUID
*/
public InstanceId getInstanceID() {
ensureOpen();
if (instanceId == null) {
// lookup by name
final String instanceName = info.getInstanceName();
String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + instanceName;
byte[] data = zooCache.get(instanceNamePath);
if (data == null) {
throw new RuntimeException(
"Instance name " + instanceName + " does not exist in zookeeper. "
+ "Run \"accumulo org.apache.accumulo.server.util.ListInstances\" to see a list.");
}
String instanceIdString = new String(data, UTF_8);
// verify that the instanceId found via the instanceName actually exists as an instance
if (zooCache.get(Constants.ZROOT + "/" + instanceIdString) == null) {
throw new RuntimeException("Instance id " + instanceIdString
+ (instanceName == null ? "" : " pointed to by the name " + instanceName)
+ " does not exist in zookeeper");
}
instanceId = InstanceId.of(instanceIdString);
}
return instanceId;
}
public String getZooKeeperRoot() {
ensureOpen();
return ZooUtil.getRoot(getInstanceID());
}
/**
* Returns the instance name given at system initialization time.
*
* @return current instance name
*/
public String getInstanceName() {
ensureOpen();
return info.getInstanceName();
}
/**
* Returns a comma-separated list of zookeeper servers the instance is using.
*
* @return the zookeeper servers this instance is using in "hostname:port" form
*/
public String getZooKeepers() {
ensureOpen();
return info.getZooKeepers();
}
/**
* Returns the zookeeper connection timeout.
*
* @return the configured timeout to connect to zookeeper
*/
public int getZooKeepersSessionTimeOut() {
ensureOpen();
return info.getZooKeepersSessionTimeOut();
}
public ZooCache getZooCache() {
ensureOpen();
return zooCache;
}
private TableZooHelper tableZooHelper;
private synchronized TableZooHelper tableZooHelper() {
ensureOpen();
if (tableZooHelper == null) {
tableZooHelper = new TableZooHelper(this);
}
return tableZooHelper;
}
public TableId getTableId(String tableName) throws TableNotFoundException {
return tableZooHelper().getTableId(tableName);
}
public TableId _getTableIdDetectNamespaceNotFound(String tableName)
throws NamespaceNotFoundException, TableNotFoundException {
return tableZooHelper()._getTableIdDetectNamespaceNotFound(tableName);
}
public String getTableName(TableId tableId) throws TableNotFoundException {
return tableZooHelper().getTableName(tableId);
}
public Map<String,TableId> getTableNameToIdMap() {
return tableZooHelper().getTableMap().getNameToIdMap();
}
public Map<TableId,String> getTableIdToNameMap() {
return tableZooHelper().getTableMap().getIdtoNameMap();
}
public boolean tableNodeExists(TableId tableId) {
return tableZooHelper().tableNodeExists(tableId);
}
public void clearTableListCache() {
tableZooHelper().clearTableListCache();
}
public String getPrintableTableInfoFromId(TableId tableId) {
return tableZooHelper().getPrintableTableInfoFromId(tableId);
}
public String getPrintableTableInfoFromName(String tableName) {
return tableZooHelper().getPrintableTableInfoFromName(tableName);
}
public TableState getTableState(TableId tableId) {
return tableZooHelper().getTableState(tableId, false);
}
public TableState getTableState(TableId tableId, boolean clearCachedState) {
return tableZooHelper().getTableState(tableId, clearCachedState);
}
public NamespaceId getNamespaceId(TableId tableId) throws TableNotFoundException {
return tableZooHelper().getNamespaceId(tableId);
}
// use cases overlap with requireNotDeleted, but this throws a checked exception
public TableId requireTableExists(TableId tableId, String tableName)
throws TableNotFoundException {
if (!tableNodeExists(tableId)) {
throw new TableNotFoundException(tableId.canonical(), tableName, "Table no longer exists");
}
return tableId;
}
// use cases overlap with requireTableExists, but this throws a runtime exception
public TableId requireNotDeleted(TableId tableId) {
if (!tableNodeExists(tableId)) {
throw new TableDeletedException(tableId.canonical());
}
return tableId;
}
public TableId requireNotOffline(TableId tableId, String tableName) {
if (getTableState(tableId) == TableState.OFFLINE) {
throw new TableOfflineException(tableId, tableName);
}
return tableId;
}
@Override
public BatchScanner createBatchScanner(String tableName, Authorizations authorizations,
int numQueryThreads) throws TableNotFoundException {
ensureOpen();
checkArgument(authorizations != null, "authorizations is null");
return new TabletServerBatchReader(this, requireNotOffline(getTableId(tableName), tableName),
tableName, authorizations, numQueryThreads);
}
@Override
public BatchScanner createBatchScanner(String tableName, Authorizations authorizations)
throws TableNotFoundException {
ensureOpen();
Integer numQueryThreads =
ClientProperty.BATCH_SCANNER_NUM_QUERY_THREADS.getInteger(getProperties());
Objects.requireNonNull(numQueryThreads);
return createBatchScanner(tableName, authorizations, numQueryThreads);
}
@Override
public BatchScanner createBatchScanner(String tableName)
throws TableNotFoundException, AccumuloSecurityException, AccumuloException {
Authorizations auths = securityOperations().getUserAuthorizations(getPrincipal());
return createBatchScanner(tableName, auths);
}
@Override
public BatchDeleter createBatchDeleter(String tableName, Authorizations authorizations,
int numQueryThreads, BatchWriterConfig config) throws TableNotFoundException {
ensureOpen();
checkArgument(authorizations != null, "authorizations is null");
return new TabletServerBatchDeleter(this, requireNotOffline(getTableId(tableName), tableName),
tableName, authorizations, numQueryThreads, config.merge(getBatchWriterConfig()));
}
@Override
public BatchDeleter createBatchDeleter(String tableName, Authorizations authorizations,
int numQueryThreads) throws TableNotFoundException {
ensureOpen();
return createBatchDeleter(tableName, authorizations, numQueryThreads, new BatchWriterConfig());
}
@Override
public BatchWriter createBatchWriter(String tableName, BatchWriterConfig config)
throws TableNotFoundException {
ensureOpen();
// we used to allow null inputs for bw config
if (config == null) {
config = new BatchWriterConfig();
}
return new BatchWriterImpl(this, requireNotOffline(getTableId(tableName), tableName),
config.merge(getBatchWriterConfig()));
}
@Override
public BatchWriter createBatchWriter(String tableName) throws TableNotFoundException {
return createBatchWriter(tableName, new BatchWriterConfig());
}
@Override
public MultiTableBatchWriter createMultiTableBatchWriter(BatchWriterConfig config) {
ensureOpen();
return new MultiTableBatchWriterImpl(this, config.merge(getBatchWriterConfig()));
}
@Override
public MultiTableBatchWriter createMultiTableBatchWriter() {
return createMultiTableBatchWriter(new BatchWriterConfig());
}
@Override
public ConditionalWriter createConditionalWriter(String tableName, ConditionalWriterConfig config)
throws TableNotFoundException {
ensureOpen();
if (config == null) {
config = new ConditionalWriterConfig();
}
return new ConditionalWriterImpl(this, requireNotOffline(getTableId(tableName), tableName),
tableName, config.merge(getConditionalWriterConfig()));
}
@Override
public ConditionalWriter createConditionalWriter(String tableName) throws TableNotFoundException {
ensureOpen();
return new ConditionalWriterImpl(this, requireNotOffline(getTableId(tableName), tableName),
tableName, new ConditionalWriterConfig());
}
@Override
public Scanner createScanner(String tableName, Authorizations authorizations)
throws TableNotFoundException {
ensureOpen();
checkArgument(authorizations != null, "authorizations is null");
Scanner scanner =
new ScannerImpl(this, requireNotOffline(getTableId(tableName), tableName), authorizations);
Integer batchSize = ClientProperty.SCANNER_BATCH_SIZE.getInteger(getProperties());
if (batchSize != null) {
scanner.setBatchSize(batchSize);
}
return scanner;
}
@Override
public Scanner createScanner(String tableName)
throws TableNotFoundException, AccumuloSecurityException, AccumuloException {
Authorizations auths = securityOperations().getUserAuthorizations(getPrincipal());
return createScanner(tableName, auths);
}
@Override
public String whoami() {
ensureOpen();
return getCredentials().getPrincipal();
}
@Override
public synchronized TableOperations tableOperations() {
ensureOpen();
return tableops;
}
@Override
public synchronized NamespaceOperations namespaceOperations() {
ensureOpen();
return namespaceops;
}
@Override
public synchronized SecurityOperations securityOperations() {
ensureOpen();
if (secops == null) {
secops = new SecurityOperationsImpl(this);
}
return secops;
}
@Override
public synchronized InstanceOperations instanceOperations() {
ensureOpen();
if (instanceops == null) {
instanceops = new InstanceOperationsImpl(this);
}
return instanceops;
}
@Override
public Properties properties() {
ensureOpen();
Properties result = new Properties();
getProperties().forEach((key, value) -> {
if (!key.equals(ClientProperty.AUTH_TOKEN.getKey())) {
result.setProperty((String) key, (String) value);
}
});
return result;
}
public AuthenticationToken token() {
ensureOpen();
return getAuthenticationToken();
}
@Override
public synchronized void close() {
closed = true;
if (thriftTransportPool != null) {
thriftTransportPool.shutdown();
}
if (tableZooHelper != null) {
tableZooHelper.close();
}
if (scannerReadaheadPool != null) {
scannerReadaheadPool.shutdownNow(); // abort all tasks, client is shutting down
}
if (cleanupThreadPool != null) {
cleanupThreadPool.shutdown(); // wait for shutdown tasks to execute
}
singletonReservation.close();
}
public static class ClientBuilderImpl<T>
implements InstanceArgs<T>, PropertyOptions<T>, AuthenticationArgs<T>, ConnectionOptions<T>,
SslOptions<T>, SaslOptions<T>, ClientFactory<T>, FromOptions<T> {
private Properties properties = new Properties();
private AuthenticationToken token = null;
private final Function<ClientBuilderImpl<T>,T> builderFunction;
private UncaughtExceptionHandler ueh = null;
public ClientBuilderImpl(Function<ClientBuilderImpl<T>,T> builderFunction) {
this.builderFunction = builderFunction;
}
private ClientInfo getClientInfo() {
if (token != null) {
ClientProperty.validate(properties, false);
return new ClientInfoImpl(properties, token);
}
ClientProperty.validate(properties);
return new ClientInfoImpl(properties);
}
private UncaughtExceptionHandler getUncaughtExceptionHandler() {
return ueh;
}
@Override
public T build() {
return builderFunction.apply(this);
}
public static AccumuloClient buildClient(ClientBuilderImpl<AccumuloClient> cbi) {
SingletonReservation reservation = SingletonManager.getClientReservation();
try {
// ClientContext closes reservation unless a RuntimeException is thrown
ClientInfo info = cbi.getClientInfo();
AccumuloConfiguration config = ClientConfConverter.toAccumuloConf(info.getProperties());
return new ClientContext(reservation, info, config, cbi.getUncaughtExceptionHandler());
} catch (RuntimeException e) {
reservation.close();
throw e;
}
}
public static Properties buildProps(ClientBuilderImpl<Properties> cbi) {
ClientProperty.validate(cbi.properties);
return cbi.properties;
}
@Override
public AuthenticationArgs<T> to(CharSequence instanceName, CharSequence zookeepers) {
setProperty(ClientProperty.INSTANCE_NAME, instanceName);
setProperty(ClientProperty.INSTANCE_ZOOKEEPERS, zookeepers);
return this;
}
@Override
public SslOptions<T> truststore(CharSequence path) {
setProperty(ClientProperty.SSL_TRUSTSTORE_PATH, path);
return this;
}
@Override
public SslOptions<T> truststore(CharSequence path, CharSequence password, CharSequence type) {
setProperty(ClientProperty.SSL_TRUSTSTORE_PATH, path);
setProperty(ClientProperty.SSL_TRUSTSTORE_PASSWORD, password);
setProperty(ClientProperty.SSL_TRUSTSTORE_TYPE, type);
return this;
}
@Override
public SslOptions<T> keystore(CharSequence path) {
setProperty(ClientProperty.SSL_KEYSTORE_PATH, path);
return this;
}
@Override
public SslOptions<T> keystore(CharSequence path, CharSequence password, CharSequence type) {
setProperty(ClientProperty.SSL_KEYSTORE_PATH, path);
setProperty(ClientProperty.SSL_KEYSTORE_PASSWORD, password);
setProperty(ClientProperty.SSL_KEYSTORE_TYPE, type);
return this;
}
@Override
public SslOptions<T> useJsse() {
setProperty(ClientProperty.SSL_USE_JSSE, "true");
return this;
}
@Override
public ConnectionOptions<T> zkTimeout(int timeout) {
ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.setTimeInMillis(properties, (long) timeout);
return this;
}
@Override
public SslOptions<T> useSsl() {
setProperty(ClientProperty.SSL_ENABLED, "true");
return this;
}
@Override
public SaslOptions<T> useSasl() {
setProperty(ClientProperty.SASL_ENABLED, "true");
return this;
}
@Override
public ConnectionOptions<T> batchWriterConfig(BatchWriterConfig batchWriterConfig) {
ClientProperty.BATCH_WRITER_MEMORY_MAX.setBytes(properties, batchWriterConfig.getMaxMemory());
ClientProperty.BATCH_WRITER_LATENCY_MAX.setTimeInMillis(properties,
batchWriterConfig.getMaxLatency(MILLISECONDS));
ClientProperty.BATCH_WRITER_TIMEOUT_MAX.setTimeInMillis(properties,
batchWriterConfig.getTimeout(MILLISECONDS));
setProperty(ClientProperty.BATCH_WRITER_THREADS_MAX, batchWriterConfig.getMaxWriteThreads());
setProperty(ClientProperty.BATCH_WRITER_DURABILITY,
batchWriterConfig.getDurability().toString());
return this;
}
@Override
public ConnectionOptions<T> batchScannerQueryThreads(int numQueryThreads) {
setProperty(ClientProperty.BATCH_SCANNER_NUM_QUERY_THREADS, numQueryThreads);
return this;
}
@Override
public ConnectionOptions<T> scannerBatchSize(int batchSize) {
setProperty(ClientProperty.SCANNER_BATCH_SIZE, batchSize);
return this;
}
@Override
public SaslOptions<T> primary(CharSequence kerberosServerPrimary) {
setProperty(ClientProperty.SASL_KERBEROS_SERVER_PRIMARY, kerberosServerPrimary);
return this;
}
@Override
public SaslOptions<T> qop(CharSequence qualityOfProtection) {
setProperty(ClientProperty.SASL_QOP, qualityOfProtection);
return this;
}
@Override
public FromOptions<T> from(String propertiesFilePath) {
return from(ClientInfoImpl.toProperties(propertiesFilePath));
}
@Override
public FromOptions<T> from(Path propertiesFile) {
return from(ClientInfoImpl.toProperties(propertiesFile));
}
@Override
public FromOptions<T> from(URL propertiesURL) {
return from(ClientInfoImpl.toProperties(propertiesURL));
}
@Override
public FromOptions<T> from(Properties properties) {
// Make a copy, so that this builder's subsequent methods don't mutate the
// properties object provided by the caller
this.properties = new Properties();
this.properties.putAll(properties);
return this;
}
@Override
public ConnectionOptions<T> as(CharSequence username, CharSequence password) {
setProperty(ClientProperty.AUTH_PRINCIPAL, username);
ClientProperty.setPassword(properties, password);
return this;
}
@Override
public ConnectionOptions<T> as(CharSequence principal, Path keyTabFile) {
setProperty(ClientProperty.AUTH_PRINCIPAL, principal);
ClientProperty.setKerberosKeytab(properties, keyTabFile.toString());
return this;
}
@Override
public ConnectionOptions<T> as(CharSequence principal, AuthenticationToken token) {
if (token.isDestroyed()) {
throw new IllegalArgumentException("AuthenticationToken has been destroyed");
}
setProperty(ClientProperty.AUTH_PRINCIPAL, principal.toString());
ClientProperty.setAuthenticationToken(properties, token);
this.token = token;
return this;
}
public void setProperty(ClientProperty property, CharSequence value) {
properties.setProperty(property.getKey(), value.toString());
}
public void setProperty(ClientProperty property, Long value) {
setProperty(property, Long.toString(value));
}
public void setProperty(ClientProperty property, Integer value) {
setProperty(property, Integer.toString(value));
}
@Override
public ClientFactory<T> withUncaughtExceptionHandler(UncaughtExceptionHandler ueh) {
this.ueh = ueh;
return this;
}
}
public ZooReader getZooReader() {
ensureOpen();
return zooReader;
}
protected long getTransportPoolMaxAgeMillis() {
ensureOpen();
return ClientProperty.RPC_TRANSPORT_IDLE_TIMEOUT.getTimeInMillis(getProperties());
}
public synchronized ThriftTransportPool getTransportPool() {
ensureOpen();
if (thriftTransportPool == null) {
thriftTransportPool = ThriftTransportPool.startNew(this::getTransportPoolMaxAgeMillis);
}
return thriftTransportPool;
}
}
| 9,876 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientServiceEnvironmentImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.accumulo.core.util.ConfigurationImpl;
public class ClientServiceEnvironmentImpl implements ServiceEnvironment {
private final ClientContext context;
public ClientServiceEnvironmentImpl(ClientContext context) {
this.context = context;
}
@Override
public Configuration getConfiguration() {
try {
return new ConfigurationImpl(
new ConfigurationCopy(context.instanceOperations().getSystemConfiguration()));
} catch (AccumuloException | AccumuloSecurityException e) {
throw new RuntimeException("Error getting system configuration", e);
}
}
@Override
public Configuration getConfiguration(TableId tableId) {
try {
return new ConfigurationImpl(
new ConfigurationCopy(context.tableOperations().getConfiguration(getTableName(tableId))));
} catch (AccumuloException | TableNotFoundException e) {
throw new RuntimeException("Error getting table configuration", e);
}
}
@Override
public String getTableName(TableId tableId) throws TableNotFoundException {
return context.getTableName(tableId);
}
@Override
public <T> T instantiate(String className, Class<T> base) throws ReflectiveOperationException {
return ClientServiceEnvironmentImpl.class.getClassLoader().loadClass(className).asSubclass(base)
.getDeclaredConstructor().newInstance();
}
@Override
public <T> T instantiate(TableId tableId, String className, Class<T> base)
throws ReflectiveOperationException {
return instantiate(className, base);
}
}
| 9,877 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/LoadMappingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl.bulk;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UncheckedIOException;
import java.util.AbstractMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.util.json.ByteArrayToBase64TypeAdapter;
import com.google.gson.Gson;
import com.google.gson.stream.JsonReader;
/**
* Iterator for reading the Bulk Load Mapping JSON.
*/
public class LoadMappingIterator
implements Iterator<Map.Entry<KeyExtent,Bulk.Files>>, AutoCloseable {
private final TableId tableId;
private final JsonReader reader;
private static final Gson gson = ByteArrayToBase64TypeAdapter.createBase64Gson();
private Map<String,String> renameMap;
LoadMappingIterator(TableId tableId, InputStream loadMappingFile) throws IOException {
this.tableId = tableId;
this.reader = new JsonReader(new BufferedReader(new InputStreamReader(loadMappingFile, UTF_8)));
this.reader.beginArray();
}
public void setRenameMap(Map<String,String> renameMap) {
this.renameMap = renameMap;
}
@Override
public void close() throws IOException {
reader.close();
}
@Override
public boolean hasNext() {
try {
return reader.hasNext();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public Map.Entry<KeyExtent,Bulk.Files> next() {
Bulk.Mapping bm = gson.fromJson(reader, Bulk.Mapping.class);
if (renameMap != null) {
return new AbstractMap.SimpleEntry<>(bm.getKeyExtent(tableId),
bm.getFiles().mapNames(renameMap));
} else {
return new AbstractMap.SimpleEntry<>(bm.getKeyExtent(tableId), bm.getFiles());
}
}
}
| 9,878 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/ConcurrentKeyExtentCache.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl.bulk;
import static org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.PREV_ROW;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.stream.Stream;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.clientImpl.bulk.BulkImport.KeyExtentCache;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.metadata.schema.TabletDeletedException;
import org.apache.accumulo.core.metadata.schema.TabletMetadata;
import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
class ConcurrentKeyExtentCache implements KeyExtentCache {
private static Logger log = LoggerFactory.getLogger(ConcurrentKeyExtentCache.class);
private static final Text MAX = new Text();
private Set<Text> rowsToLookup = Collections.synchronizedSet(new HashSet<>());
List<Text> lookupRows = new ArrayList<>();
private ConcurrentSkipListMap<Text,KeyExtent> extents = new ConcurrentSkipListMap<>((t1, t2) -> {
return (t1 == t2) ? 0 : (t1 == MAX ? 1 : (t2 == MAX ? -1 : t1.compareTo(t2)));
});
private TableId tableId;
private ClientContext ctx;
ConcurrentKeyExtentCache(TableId tableId, ClientContext ctx) {
this.tableId = tableId;
this.ctx = ctx;
}
private KeyExtent getFromCache(Text row) {
Entry<Text,KeyExtent> entry = extents.ceilingEntry(row);
if (entry != null && entry.getValue().contains(row)) {
return entry.getValue();
}
return null;
}
private boolean inCache(KeyExtent e) {
return Objects.equals(e, extents.get(e.endRow() == null ? MAX : e.endRow()));
}
@VisibleForTesting
protected void updateCache(KeyExtent e) {
Text prevRow = e.prevEndRow() == null ? new Text() : e.prevEndRow();
Text endRow = e.endRow() == null ? MAX : e.endRow();
extents.subMap(prevRow, e.prevEndRow() == null, endRow, true).clear();
extents.put(endRow, e);
}
@VisibleForTesting
protected Stream<KeyExtent> lookupExtents(Text row) {
return TabletsMetadata.builder(ctx).forTable(tableId).overlapping(row, true, null)
.checkConsistency().fetch(PREV_ROW).build().stream().limit(100)
.map(TabletMetadata::getExtent);
}
@Override
public KeyExtent lookup(Text row) {
while (true) {
KeyExtent ke = getFromCache(row);
if (ke != null) {
return ke;
}
// If a metadata lookup is currently in progress, then multiple threads can queue up their
// rows. The next lookup will process all queued. Processing multiple at once can be more
// efficient.
rowsToLookup.add(row);
synchronized (this) {
// This check is done to avoid processing rowsToLookup when the current thread's row is in
// the cache.
ke = getFromCache(row);
if (ke != null) {
rowsToLookup.remove(row);
return ke;
}
lookupRows.clear();
synchronized (rowsToLookup) {
// Gather all rows that were queued for lookup before this point in time.
rowsToLookup.forEach(lookupRows::add);
rowsToLookup.clear();
}
// Lookup rows in the metadata table in sorted order. This could possibly lead to less
// metadata lookups.
lookupRows.sort(Text::compareTo);
for (Text lookupRow : lookupRows) {
if (getFromCache(lookupRow) == null) {
while (true) {
try {
Iterator<KeyExtent> iter = lookupExtents(lookupRow).iterator();
while (iter.hasNext()) {
KeyExtent ke2 = iter.next();
if (inCache(ke2)) {
break;
}
updateCache(ke2);
}
break;
} catch (TabletDeletedException tde) {
// tablets were merged away in the table, start over and try again
log.debug("While trying to obtain a tablet location for bulk import, a tablet was "
+ "deleted. If this was caused by a concurrent merge tablet "
+ "operation, this is okay. Otherwise, it could be a problem.", tde);
}
}
}
}
}
}
}
}
| 9,879 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/BulkImport.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl.bulk;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.stream.Collectors.groupingBy;
import static org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.pathToCacheId;
import static org.apache.accumulo.core.util.Validators.EXISTING_TABLE_NAME;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.stream.Stream;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.TableOperations.ImportDestinationArguments;
import org.apache.accumulo.core.client.admin.TableOperations.ImportMappingOptions;
import org.apache.accumulo.core.clientImpl.AccumuloBulkMergeException;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.clientImpl.TableOperationsImpl;
import org.apache.accumulo.core.clientImpl.bulk.Bulk.FileInfo;
import org.apache.accumulo.core.clientImpl.bulk.Bulk.Files;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.crypto.CryptoFactoryLoader;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.LoadPlan;
import org.apache.accumulo.core.data.LoadPlan.Destination;
import org.apache.accumulo.core.data.LoadPlan.RangeType;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.file.FileOperations;
import org.apache.accumulo.core.file.FileSKVIterator;
import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
import org.apache.accumulo.core.spi.crypto.CryptoService;
import org.apache.accumulo.core.util.Retry;
import org.apache.accumulo.core.volume.VolumeConfiguration;
import org.apache.commons.io.FilenameUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
public class BulkImport implements ImportDestinationArguments, ImportMappingOptions {
private static final Logger log = LoggerFactory.getLogger(BulkImport.class);
private boolean setTime = false;
private boolean ignoreEmptyDir = false;
private Executor executor = null;
private final String dir;
private int numThreads = -1;
private final ClientContext context;
private String tableName;
private LoadPlan plan = null;
public BulkImport(String directory, ClientContext context) {
this.context = context;
this.dir = Objects.requireNonNull(directory);
}
@Override
public ImportMappingOptions tableTime(boolean value) {
this.setTime = value;
return this;
}
@Override
public ImportMappingOptions ignoreEmptyDir(boolean ignore) {
this.ignoreEmptyDir = ignore;
return this;
}
@Override
public void load()
throws TableNotFoundException, IOException, AccumuloException, AccumuloSecurityException {
TableId tableId = context.getTableId(tableName);
FileSystem fs = VolumeConfiguration.fileSystemForPath(dir, context.getHadoopConf());
Path srcPath = checkPath(fs, dir);
SortedMap<KeyExtent,Bulk.Files> mappings;
TableOperationsImpl tableOps = new TableOperationsImpl(context);
Map<String,String> tableProps = tableOps.getConfiguration(tableName);
int maxTablets = 0;
var propValue = tableProps.get(Property.TABLE_BULK_MAX_TABLETS.getKey());
if (propValue != null) {
maxTablets = Integer.parseInt(propValue);
}
Retry retry = Retry.builder().infiniteRetries().retryAfter(100, MILLISECONDS)
.incrementBy(100, MILLISECONDS).maxWait(2, MINUTES).backOffFactor(1.5)
.logInterval(3, MINUTES).createRetry();
// retry if a merge occurs
boolean shouldRetry = true;
while (shouldRetry) {
if (plan == null) {
mappings = computeMappingFromFiles(fs, tableId, tableProps, srcPath, maxTablets);
} else {
mappings = computeMappingFromPlan(fs, tableId, srcPath, maxTablets);
}
if (mappings.isEmpty()) {
if (ignoreEmptyDir == true) {
log.info("Attempted to import files from empty directory - {}. Zero files imported",
srcPath);
return;
} else {
throw new IllegalArgumentException("Attempted to import zero files from " + srcPath);
}
}
BulkSerialize.writeLoadMapping(mappings, srcPath.toString(), fs::create);
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)),
ByteBuffer.wrap(srcPath.toString().getBytes(UTF_8)),
ByteBuffer.wrap((setTime + "").getBytes(UTF_8)));
try {
tableOps.doBulkFateOperation(args, tableName);
shouldRetry = false;
} catch (AccumuloBulkMergeException ae) {
if (plan != null) {
checkPlanForSplits(ae);
}
try {
retry.waitForNextAttempt(log, String.format("bulk import to %s(%s)", tableName, tableId));
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
log.info(ae.getMessage() + ". Retrying bulk import to " + tableName);
}
}
}
/**
* Check if splits were specified in plan when a concurrent merge occurred. If so, throw error
* back to user since retrying won't help. If not, then retry.
*/
private void checkPlanForSplits(AccumuloBulkMergeException abme) throws AccumuloException {
for (Destination des : plan.getDestinations()) {
if (des.getRangeType().equals(RangeType.TABLE)) {
throw new AccumuloException("The splits provided in Load Plan do not exist in " + tableName,
abme);
}
}
}
/**
* Check path of bulk directory and permissions
*/
private Path checkPath(FileSystem fs, String dir) throws IOException, AccumuloException {
Path ret = dir.contains(":") ? new Path(dir) : fs.makeQualified(new Path(dir));
try {
if (!fs.getFileStatus(ret).isDirectory()) {
throw new AccumuloException("Bulk import directory " + dir + " is not a directory!");
}
Path tmpFile = new Path(ret, "isWritable");
if (fs.createNewFile(tmpFile)) {
fs.delete(tmpFile, true);
} else {
throw new AccumuloException("Bulk import directory " + dir + " is not writable.");
}
} catch (FileNotFoundException fnf) {
throw new AccumuloException(
"Bulk import directory " + dir + " does not exist or has bad permissions", fnf);
}
// TODO ensure dir does not contain bulk load mapping
return ret;
}
@Override
public ImportMappingOptions executor(Executor service) {
this.executor = Objects.requireNonNull(service);
return this;
}
@Override
public ImportMappingOptions threads(int numThreads) {
Preconditions.checkArgument(numThreads > 0, "Non positive number of threads given : %s",
numThreads);
this.numThreads = numThreads;
return this;
}
@Override
public ImportMappingOptions plan(LoadPlan plan) {
this.plan = Objects.requireNonNull(plan);
return this;
}
@Override
public ImportMappingOptions to(String tableName) {
this.tableName = EXISTING_TABLE_NAME.validate(tableName);
return this;
}
private static final byte[] byte0 = {0};
private static class MLong {
public MLong(long i) {
l = i;
}
long l;
}
public static Map<KeyExtent,Long> estimateSizes(AccumuloConfiguration acuConf,
UnreferencedTabletFile dataFile, long fileSize, Collection<KeyExtent> extents, FileSystem ns,
Cache<String,Long> fileLenCache, CryptoService cs) throws IOException {
if (extents.size() == 1) {
return Collections.singletonMap(extents.iterator().next(), fileSize);
}
long totalIndexEntries = 0;
Map<KeyExtent,MLong> counts = new TreeMap<>();
for (KeyExtent keyExtent : extents) {
counts.put(keyExtent, new MLong(0));
}
Text row = new Text();
try (FileSKVIterator index =
FileOperations.getInstance().newIndexReaderBuilder().forFile(dataFile, ns, ns.getConf(), cs)
.withTableConfiguration(acuConf).withFileLenCache(fileLenCache).build()) {
while (index.hasTop()) {
Key key = index.getTopKey();
totalIndexEntries++;
key.getRow(row);
for (Entry<KeyExtent,MLong> entry : counts.entrySet()) {
if (entry.getKey().contains(row)) {
entry.getValue().l++;
}
}
index.next();
}
}
Map<KeyExtent,Long> results = new TreeMap<>();
for (KeyExtent keyExtent : extents) {
double numEntries = counts.get(keyExtent).l;
if (numEntries == 0) {
numEntries = 1;
}
long estSize = (long) ((numEntries / totalIndexEntries) * fileSize);
results.put(keyExtent, estSize);
}
return results;
}
public interface KeyExtentCache {
KeyExtent lookup(Text row);
}
public static List<KeyExtent> findOverlappingTablets(KeyExtentCache extentCache,
FileSKVIterator reader) throws IOException {
List<KeyExtent> result = new ArrayList<>();
Collection<ByteSequence> columnFamilies = Collections.emptyList();
Text row = new Text();
while (true) {
reader.seek(new Range(row, null), columnFamilies, false);
if (!reader.hasTop()) {
break;
}
row = reader.getTopKey().getRow();
KeyExtent extent = extentCache.lookup(row);
result.add(extent);
row = extent.endRow();
if (row != null) {
row = nextRow(row);
} else {
break;
}
}
return result;
}
private static Text nextRow(Text row) {
Text next = new Text(row);
next.append(byte0, 0, byte0.length);
return next;
}
public static List<KeyExtent> findOverlappingTablets(ClientContext context,
KeyExtentCache extentCache, UnreferencedTabletFile file, FileSystem fs,
Cache<String,Long> fileLenCache, CryptoService cs) throws IOException {
try (FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder()
.forFile(file, fs, fs.getConf(), cs).withTableConfiguration(context.getConfiguration())
.withFileLenCache(fileLenCache).seekToBeginning().build()) {
return findOverlappingTablets(extentCache, reader);
}
}
private static Map<String,Long> getFileLenMap(List<FileStatus> statuses) {
HashMap<String,Long> fileLens = new HashMap<>();
for (FileStatus status : statuses) {
fileLens.put(status.getPath().getName(), status.getLen());
}
return fileLens;
}
private static Cache<String,Long> getPopulatedFileLenCache(Path dir, List<FileStatus> statuses) {
Map<String,Long> fileLens = getFileLenMap(statuses);
Map<String,Long> absFileLens = new HashMap<>();
fileLens.forEach((k, v) -> absFileLens.put(pathToCacheId(new Path(dir, k)), v));
Cache<String,Long> fileLenCache = Caffeine.newBuilder().build();
fileLenCache.putAll(absFileLens);
return fileLenCache;
}
private SortedMap<KeyExtent,Files> computeMappingFromPlan(FileSystem fs, TableId tableId,
Path srcPath, int maxTablets)
throws IOException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
Map<String,List<Destination>> fileDestinations =
plan.getDestinations().stream().collect(groupingBy(Destination::getFileName));
List<FileStatus> statuses = filterInvalid(
fs.listStatus(srcPath, p -> !p.getName().equals(Constants.BULK_LOAD_MAPPING)));
Map<String,Long> fileLens = getFileLenMap(statuses);
if (!fileDestinations.keySet().equals(fileLens.keySet())) {
throw new IllegalArgumentException(
"Load plan files differ from directory files, symmetric difference : "
+ Sets.symmetricDifference(fileDestinations.keySet(), fileLens.keySet()));
}
KeyExtentCache extentCache = new ConcurrentKeyExtentCache(tableId, context);
// Pre-populate cache by looking up all end rows in sorted order. Doing this in sorted order
// leverages read ahead.
fileDestinations.values().stream().flatMap(List::stream)
.filter(dest -> dest.getRangeType() == RangeType.FILE)
.flatMap(dest -> Stream.of(dest.getStartRow(), dest.getEndRow())).filter(Objects::nonNull)
.map(Text::new).sorted().distinct().forEach(extentCache::lookup);
SortedMap<KeyExtent,Files> mapping = new TreeMap<>();
for (Entry<String,List<Destination>> entry : fileDestinations.entrySet()) {
String fileName = entry.getKey();
List<Destination> destinations = entry.getValue();
Set<KeyExtent> extents = mapDestinationsToExtents(tableId, extentCache, destinations);
log.debug("The file {} mapped to {} tablets.", fileName, extents.size());
checkTabletCount(maxTablets, extents.size(), fileName);
long estSize = (long) (fileLens.get(fileName) / (double) extents.size());
for (KeyExtent keyExtent : extents) {
mapping.computeIfAbsent(keyExtent, k -> new Files())
.add(new FileInfo(fileName, estSize, 0));
}
}
return mergeOverlapping(mapping);
}
private Text toText(byte[] row) {
return row == null ? null : new Text(row);
}
private Set<KeyExtent> mapDestinationsToExtents(TableId tableId, KeyExtentCache kec,
List<Destination> destinations) {
Set<KeyExtent> extents = new HashSet<>();
for (Destination dest : destinations) {
if (dest.getRangeType() == RangeType.TABLE) {
extents.add(new KeyExtent(tableId, toText(dest.getEndRow()), toText(dest.getStartRow())));
} else if (dest.getRangeType() == RangeType.FILE) {
Text startRow = new Text(dest.getStartRow());
Text endRow = new Text(dest.getEndRow());
KeyExtent extent = kec.lookup(startRow);
extents.add(extent);
while (!extent.contains(endRow) && extent.endRow() != null) {
extent = kec.lookup(nextRow(extent.endRow()));
extents.add(extent);
}
} else {
throw new IllegalStateException();
}
}
return extents;
}
private SortedMap<KeyExtent,Bulk.Files> computeMappingFromFiles(FileSystem fs, TableId tableId,
Map<String,String> tableProps, Path dirPath, int maxTablets)
throws IOException, AccumuloException, AccumuloSecurityException {
Executor executor;
ExecutorService service = null;
if (this.executor != null) {
executor = this.executor;
} else if (numThreads > 0) {
executor = service =
context.threadPools().createFixedThreadPool(numThreads, "BulkImportThread", false);
} else {
String threads = context.getConfiguration().get(ClientProperty.BULK_LOAD_THREADS.getKey());
executor = service = context.threadPools().createFixedThreadPool(
ConfigurationTypeHelper.getNumThreads(threads), "BulkImportThread", false);
}
try {
return computeFileToTabletMappings(fs, tableId, tableProps, dirPath, executor, context,
maxTablets);
} finally {
if (service != null) {
service.shutdown();
}
}
}
public static List<FileStatus> filterInvalid(FileStatus[] files) {
ArrayList<FileStatus> fileList = new ArrayList<>(files.length);
for (FileStatus fileStatus : files) {
String fname = fileStatus.getPath().getName();
if (fileStatus.isDirectory()) {
log.debug("{} is a directory, ignoring.", fileStatus.getPath());
continue;
}
if (FileOperations.getBulkWorkingFiles().contains(fname)) {
log.debug("{} is an internal working file, ignoring.", fileStatus.getPath());
continue;
}
if (!FileOperations.getValidExtensions().contains(FilenameUtils.getExtension(fname))) {
log.warn("{} does not have a valid extension, ignoring", fileStatus.getPath());
continue;
}
fileList.add(fileStatus);
}
return fileList;
}
public SortedMap<KeyExtent,Bulk.Files> computeFileToTabletMappings(FileSystem fs, TableId tableId,
Map<String,String> tableProps, Path dirPath, Executor executor, ClientContext context,
int maxTablets) throws IOException, AccumuloException, AccumuloSecurityException {
KeyExtentCache extentCache = new ConcurrentKeyExtentCache(tableId, context);
List<FileStatus> files = filterInvalid(
fs.listStatus(dirPath, p -> !p.getName().equals(Constants.BULK_LOAD_MAPPING)));
// we know all of the file lens, so construct a cache and populate it in order to avoid later
// trips to the namenode
Cache<String,Long> fileLensCache = getPopulatedFileLenCache(dirPath, files);
List<CompletableFuture<Map<KeyExtent,Bulk.FileInfo>>> futures = new ArrayList<>();
CryptoService cs = CryptoFactoryLoader.getServiceForClientWithTable(
context.instanceOperations().getSystemConfiguration(), tableProps, tableId);
for (FileStatus fileStatus : files) {
UnreferencedTabletFile file = UnreferencedTabletFile.of(fs, fileStatus.getPath());
CompletableFuture<Map<KeyExtent,Bulk.FileInfo>> future = CompletableFuture.supplyAsync(() -> {
try {
long t1 = System.currentTimeMillis();
List<KeyExtent> extents =
findOverlappingTablets(context, extentCache, file, fs, fileLensCache, cs);
// make sure file isn't going to too many tablets
checkTabletCount(maxTablets, extents.size(), file.toString());
Map<KeyExtent,Long> estSizes = estimateSizes(context.getConfiguration(), file,
fileStatus.getLen(), extents, fs, fileLensCache, cs);
Map<KeyExtent,Bulk.FileInfo> pathLocations = new HashMap<>();
for (KeyExtent ke : extents) {
pathLocations.put(ke, new Bulk.FileInfo(file.getPath(), estSizes.getOrDefault(ke, 0L)));
}
long t2 = System.currentTimeMillis();
log.debug("Mapped {} to {} tablets in {}ms", file, pathLocations.size(), t2 - t1);
return pathLocations;
} catch (Exception e) {
throw new CompletionException(e);
}
}, executor);
futures.add(future);
}
SortedMap<KeyExtent,Bulk.Files> mappings = new TreeMap<>();
for (CompletableFuture<Map<KeyExtent,Bulk.FileInfo>> future : futures) {
try {
Map<KeyExtent,Bulk.FileInfo> pathMapping = future.get();
pathMapping.forEach((ext, fi) -> mappings.computeIfAbsent(ext, k -> new Files()).add(fi));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IllegalStateException(e);
} catch (ExecutionException e) {
throw new IllegalStateException(e);
}
}
return mergeOverlapping(mappings);
}
// This method handles the case of splits happening while files are being examined. It merges
// smaller tablets into large tablets.
static SortedMap<KeyExtent,Bulk.Files>
mergeOverlapping(SortedMap<KeyExtent,Bulk.Files> mappings) {
List<KeyExtent> extents = new ArrayList<>(mappings.keySet());
for (KeyExtent ke : extents) {
Set<KeyExtent> overlapping = KeyExtent.findOverlapping(ke, mappings);
for (KeyExtent oke : overlapping) {
if (ke.equals(oke)) {
continue;
}
if (ke.contains(oke)) {
mappings.get(ke).merge(mappings.remove(oke));
} else if (!oke.contains(ke)) {
throw new IllegalStateException("Error during bulk import: Unable to merge overlapping "
+ "tablets where neither tablet contains the other. This may be caused by "
+ "a concurrent merge. Key extents " + oke + " and " + ke + " overlap, but "
+ "neither contains the other.");
}
}
}
return mappings;
}
private void checkTabletCount(int tabletMaxSize, int tabletCount, String file) {
if (tabletMaxSize > 0 && tabletCount > tabletMaxSize) {
throw new IllegalArgumentException("The file " + file + " attempted to import to "
+ tabletCount + " tablets. Max tablets allowed set to " + tabletMaxSize);
}
}
}
| 9,880 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/Bulk.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl.bulk;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.util.TextUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import com.google.common.base.Preconditions;
public class Bulk {
/**
* WARNING : do not change this class, its used for serialization to Json
*/
public static class Mapping {
private Tablet tablet;
private Collection<FileInfo> files;
public Mapping(KeyExtent tablet, Files files) {
this.tablet = toTablet(tablet);
this.files = files.files.values();
}
public Tablet getTablet() {
return tablet;
}
public KeyExtent getKeyExtent(TableId tableId) {
return tablet.toKeyExtent(tableId);
}
public Files getFiles() {
return new Files(files);
}
}
/**
* WARNING : do not change this class, its used for serialization to Json
*/
public static class Tablet {
private byte[] endRow;
private byte[] prevEndRow;
public Tablet(Text endRow, Text prevEndRow) {
this.endRow = endRow == null ? null : TextUtil.getBytes(endRow);
this.prevEndRow = prevEndRow == null ? null : TextUtil.getBytes(prevEndRow);
}
public KeyExtent toKeyExtent(TableId tableId) {
return Bulk.toKeyExtent(tableId, this);
}
public Text getEndRow() {
if (endRow == null) {
return null;
}
return new Text(endRow);
}
public Text getPrevEndRow() {
if (prevEndRow == null) {
return null;
}
return new Text(prevEndRow);
}
@Override
public String toString() {
return getEndRow() + ";" + getPrevEndRow();
}
}
/**
* WARNING : do not change this class, its used for serialization to Json
*/
public static class FileInfo {
final String name;
final long estSize;
final long estEntries;
public FileInfo(String fileName, long estFileSize, long estNumEntries) {
this.name = fileName;
this.estSize = estFileSize;
this.estEntries = estNumEntries;
}
public FileInfo(Path path, long estSize) {
this(path.getName(), estSize, 0);
}
static FileInfo merge(FileInfo fi1, FileInfo fi2) {
Preconditions.checkArgument(fi1.name.equals(fi2.name));
return new FileInfo(fi1.name, fi1.estSize + fi2.estSize, fi1.estEntries + fi2.estEntries);
}
public String getFileName() {
return name;
}
public long getEstFileSize() {
return estSize;
}
public long getEstNumEntries() {
return estEntries;
}
@Override
public String toString() {
return String.format("file:%s estSize:%d estEntries:%s", name, estSize, estEntries);
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
if (!(o instanceof FileInfo)) {
return false;
}
FileInfo other = (FileInfo) o;
return this.name.equals(other.name) && this.estSize == other.estSize
&& this.estEntries == other.estEntries;
}
@Override
public int hashCode() {
return Objects.hash(name, estSize, estEntries);
}
}
public static class Files implements Iterable<FileInfo> {
Map<String,FileInfo> files = new HashMap<>();
public Files(Collection<FileInfo> files) {
files.forEach(this::add);
}
public Files() {}
public void add(FileInfo fi) {
if (files.putIfAbsent(fi.name, fi) != null) {
throw new IllegalArgumentException("File already present " + fi.name);
}
}
public FileInfo get(String fileName) {
return files.get(fileName);
}
public Files mapNames(Map<String,String> renames) {
Files renamed = new Files();
files.forEach((k, v) -> {
String newName = renames.get(k);
FileInfo nfi = new FileInfo(newName, v.estSize, v.estEntries);
renamed.files.put(newName, nfi);
});
return renamed;
}
void merge(Files other) {
other.files.forEach((k, v) -> {
files.merge(k, v, FileInfo::merge);
});
}
public int getSize() {
return this.files.size();
}
@Override
public Iterator<FileInfo> iterator() {
return files.values().iterator();
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
if (!(o instanceof Files)) {
return false;
}
Files other = (Files) o;
return this.files.equals(other.files);
}
@Override
public int hashCode() {
return files.hashCode();
}
@Override
public String toString() {
return files.toString();
}
}
public static Tablet toTablet(KeyExtent keyExtent) {
return new Tablet(keyExtent.endRow(), keyExtent.prevEndRow());
}
public static KeyExtent toKeyExtent(TableId tableId, Tablet tablet) {
return new KeyExtent(tableId, tablet.getEndRow(), tablet.getPrevEndRow());
}
}
| 9,881 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/BulkSerialize.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl.bulk;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.accumulo.core.util.LazySingletons.GSON;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.clientImpl.bulk.Bulk.Files;
import org.apache.accumulo.core.clientImpl.bulk.Bulk.Mapping;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.util.json.ByteArrayToBase64TypeAdapter;
import org.apache.hadoop.fs.Path;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import com.google.gson.stream.JsonWriter;
/**
* Place for all bulk import serialization code. For the objects being serialized see {@link Bulk}
*/
public class BulkSerialize {
private static final Gson gson = ByteArrayToBase64TypeAdapter.createBase64Gson();
public interface Output {
OutputStream create(Path path) throws IOException;
}
public interface Input {
InputStream open(Path path) throws IOException;
}
/**
* Serialize bulk load mapping to {@value Constants#BULK_LOAD_MAPPING}
*/
public static void writeLoadMapping(SortedMap<KeyExtent,Bulk.Files> loadMapping, String sourceDir,
Output output) throws IOException {
final Path lmFile = new Path(sourceDir, Constants.BULK_LOAD_MAPPING);
try (OutputStream fsOut = output.create(lmFile); JsonWriter writer =
new JsonWriter(new BufferedWriter(new OutputStreamWriter(fsOut, UTF_8)))) {
writer.setIndent(" ");
writer.beginArray();
Set<Entry<KeyExtent,Files>> es = loadMapping.entrySet();
for (Entry<KeyExtent,Files> entry : es) {
Mapping mapping = new Bulk.Mapping(entry.getKey(), entry.getValue());
gson.toJson(mapping, Mapping.class, writer);
}
writer.endArray();
}
}
/**
* Read Json array of Bulk.Mapping into LoadMappingIterator
*/
public static LoadMappingIterator readLoadMapping(String bulkDir, TableId tableId, Input input)
throws IOException {
final Path lmFile = new Path(bulkDir, Constants.BULK_LOAD_MAPPING);
return new LoadMappingIterator(tableId, input.open(lmFile));
}
/**
* Writes rename file to JSON. This file maps all the old names to the new names for the
* BulkImportMove FATE operation.
*/
public static void writeRenameMap(Map<String,String> oldToNewNameMap, String bulkDir,
Output output) throws IOException {
final Path renamingFile = new Path(bulkDir, Constants.BULK_RENAME_FILE);
try (OutputStream fsOut = output.create(renamingFile);
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(fsOut))) {
GSON.get().toJson(oldToNewNameMap, writer);
}
}
/**
* Reads the serialized rename file. This file maps all the old names to the new names for the
* BulkImportMove FATE operation.
*/
public static Map<String,String> readRenameMap(String bulkDir, Input input) throws IOException {
final Path renamingFile = new Path(bulkDir, Constants.BULK_RENAME_FILE);
Map<String,String> oldToNewNameMap;
try (InputStream fis = input.open(renamingFile);
BufferedReader reader = new BufferedReader(new InputStreamReader(fis))) {
oldToNewNameMap = gson.fromJson(reader, new TypeToken<Map<String,String>>() {}.getType());
}
return oldToNewNameMap;
}
/**
* Read in both maps and change all the file names in the mapping to the new names. This is needed
* because the load mapping file was written with the original file names before they were moved
* by BulkImportMove
*/
public static LoadMappingIterator getUpdatedLoadMapping(String bulkDir, TableId tableId,
Input input) throws IOException {
Map<String,String> renames = readRenameMap(bulkDir, input);
LoadMappingIterator lmi = readLoadMapping(bulkDir, tableId, input);
lmi.setRenameMap(renames);
return lmi;
}
}
| 9,882 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/lexicoder/ByteUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl.lexicoder;
import java.util.ArrayList;
public class ByteUtils {
/**
* Escapes 0x00 with 0x01 0x01 and 0x01 with 0x01 0x02
*/
public static byte[] escape(byte[] in) {
int escapeCount = 0;
for (byte value : in) {
if (value == 0x00 || value == 0x01) {
escapeCount++;
}
}
if (escapeCount == 0) {
return in;
}
byte[] ret = new byte[escapeCount + in.length];
int index = 0;
for (byte b : in) {
switch (b) {
case 0x00:
ret[index++] = 0x01;
ret[index++] = 0x01;
break;
case 0x01:
ret[index++] = 0x01;
ret[index++] = 0x02;
break;
default:
ret[index++] = b;
}
}
return ret;
}
/**
* Unescapes 0x00 0x01 0x01 with 0x00 and 0x01 0x01 0x2 with 0x01
*/
public static byte[] unescape(byte[] in) {
int escapeCount = 0;
for (int i = 0; i < in.length; i++) {
if (in[i] == 0x01) {
escapeCount++;
i++;
}
}
if (escapeCount == 0) {
return in;
}
byte[] ret = new byte[in.length - escapeCount];
int index = 0;
for (int i = 0; i < in.length; i++) {
if (in[i] == 0x01) {
i++;
if (i >= in.length) {
throw new IllegalArgumentException("Bad bytes when attempting to unescape. Expected "
+ "more bytes after last byte " + String.format("x%02x", in[in.length - 1]));
}
ret[index++] = (byte) (in[i] - 1);
} else {
ret[index++] = in[i];
}
}
return ret;
}
/**
* Splits a byte array by 0x00
*/
public static byte[][] split(byte[] data) {
return split(data, 0, data.length);
}
/**
* Splits a byte array by 0x00
*/
public static byte[][] split(byte[] data, int dataOffset, int len) {
ArrayList<Integer> offsets = new ArrayList<>();
for (int i = dataOffset; i < (dataOffset + len); i++) {
if (data[i] == 0x00) {
offsets.add(i);
}
}
offsets.add(dataOffset + len);
byte[][] ret = new byte[offsets.size()][];
int index = dataOffset;
for (int i = 0; i < offsets.size(); i++) {
ret[i] = new byte[offsets.get(i) - index];
System.arraycopy(data, index, ret[i], 0, ret[i].length);
index = offsets.get(i) + 1;
}
return ret;
}
/**
* Concatenates byte arrays with 0x00 as a delimiter
*/
public static byte[] concat(byte[]... fields) {
int len = 0;
for (byte[] field : fields) {
len += field.length;
}
byte[] ret = new byte[len + fields.length - 1];
int index = 0;
for (byte[] field : fields) {
System.arraycopy(field, 0, ret, index, field.length);
index += field.length;
if (index < ret.length) {
ret[index++] = 0x00;
}
}
return ret;
}
}
| 9,883 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/clientImpl/lexicoder/FixedByteArrayOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl.lexicoder;
import java.io.OutputStream;
/**
* Uses a fixed length array and will not grow in size dynamically like the
* {@link java.io.ByteArrayOutputStream}.
*/
public class FixedByteArrayOutputStream extends OutputStream {
private int i;
byte[] out;
public FixedByteArrayOutputStream(byte[] out) {
this.out = out;
}
@Override
public void write(int b) {
out[i++] = (byte) b;
}
@Override
public void write(byte[] b, int off, int len) {
System.arraycopy(b, off, out, i, len);
i += len;
}
}
| 9,884 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/sample | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/sample/impl/DataoutputHasher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.sample.impl;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.DataOutput;
import com.google.common.hash.Hasher;
public class DataoutputHasher implements DataOutput {
private Hasher hasher;
public DataoutputHasher(Hasher hasher) {
this.hasher = hasher;
}
@Override
public void write(int b) {
hasher.putByte((byte) (0xff & b));
}
@Override
public void write(byte[] b) {
hasher.putBytes(b);
}
@Override
public void write(byte[] b, int off, int len) {
hasher.putBytes(b, off, len);
}
@Override
public void writeBoolean(boolean v) {
hasher.putBoolean(v);
}
@Override
public void writeByte(int v) {
hasher.putByte((byte) (0xff & v));
}
@Override
public void writeShort(int v) {
hasher.putShort((short) (0xffff & v));
}
@Override
public void writeChar(int v) {
hasher.putChar((char) v);
}
@Override
public void writeInt(int v) {
hasher.putInt(v);
}
@Override
public void writeLong(long v) {
hasher.putLong(v);
}
@Override
public void writeFloat(float v) {
hasher.putDouble(v);
}
@Override
public void writeDouble(double v) {
hasher.putDouble(v);
}
@Override
public void writeBytes(String s) {
for (int i = 0; i < s.length(); i++) {
hasher.putByte((byte) (0xff & s.charAt(i)));
}
}
@Override
public void writeChars(String s) {
hasher.putString(s, UTF_8);
}
@Override
public void writeUTF(String s) {
hasher.putInt(s.length());
hasher.putBytes(s.getBytes(UTF_8));
}
}
| 9,885 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/sample | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/sample/impl/SamplerConfigurationImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.sample.impl;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.tabletscan.thrift.TSamplerConfiguration;
import org.apache.accumulo.core.util.Pair;
import org.apache.hadoop.io.Writable;
public class SamplerConfigurationImpl implements Writable {
private String className;
private Map<String,String> options;
public SamplerConfigurationImpl(DataInput in) throws IOException {
readFields(in);
}
public SamplerConfigurationImpl(SamplerConfiguration sc) {
this.className = sc.getSamplerClassName();
this.options = new HashMap<>(sc.getOptions());
}
public SamplerConfigurationImpl(String className, Map<String,String> options) {
this.className = className;
this.options = options;
}
public SamplerConfigurationImpl() {}
public String getClassName() {
return className;
}
public Map<String,String> getOptions() {
return Collections.unmodifiableMap(options);
}
@Override
public int hashCode() {
return 31 * className.hashCode() + options.hashCode();
}
@Override
public boolean equals(Object o) {
if (o instanceof SamplerConfigurationImpl) {
SamplerConfigurationImpl osc = (SamplerConfigurationImpl) o;
return className.equals(osc.className) && options.equals(osc.options);
}
return false;
}
@Override
public void write(DataOutput out) throws IOException {
// The Writable serialization methods for this class are called by RFile and therefore must be
// very stable. An alternative way to serialize this class is to
// use Thrift. That was not used here inorder to avoid making RFile depend on Thrift.
// versioning info
out.write(1);
out.writeUTF(className);
out.writeInt(options.size());
for (Entry<String,String> entry : options.entrySet()) {
out.writeUTF(entry.getKey());
out.writeUTF(entry.getValue());
}
}
@Override
public void readFields(DataInput in) throws IOException {
int version = in.readByte();
if (version != 1) {
throw new IllegalArgumentException("Unexpected version " + version);
}
className = in.readUTF();
options = new HashMap<>();
int num = in.readInt();
for (int i = 0; i < num; i++) {
String key = in.readUTF();
String val = in.readUTF();
options.put(key, val);
}
}
public SamplerConfiguration toSamplerConfiguration() {
SamplerConfiguration sc = new SamplerConfiguration(className);
sc.setOptions(options);
return sc;
}
public List<Pair<String,String>> toTableProperties() {
ArrayList<Pair<String,String>> props = new ArrayList<>();
for (Entry<String,String> entry : options.entrySet()) {
props
.add(new Pair<>(Property.TABLE_SAMPLER_OPTS.getKey() + entry.getKey(), entry.getValue()));
}
// intentionally added last, so its set last
props.add(new Pair<>(Property.TABLE_SAMPLER.getKey(), className));
return props;
}
public Map<String,String> toTablePropertiesMap() {
LinkedHashMap<String,String> propsMap = new LinkedHashMap<>();
for (Pair<String,String> pair : toTableProperties()) {
propsMap.put(pair.getFirst(), pair.getSecond());
}
return propsMap;
}
public static SamplerConfigurationImpl newSamplerConfig(AccumuloConfiguration acuconf) {
String className = acuconf.get(Property.TABLE_SAMPLER);
if (className == null || className.equals("")) {
return null;
}
Map<String,String> rawOptions = acuconf.getAllPropertiesWithPrefix(Property.TABLE_SAMPLER_OPTS);
Map<String,String> options = new HashMap<>();
for (Entry<String,String> entry : rawOptions.entrySet()) {
String key = entry.getKey().substring(Property.TABLE_SAMPLER_OPTS.getKey().length());
options.put(key, entry.getValue());
}
return new SamplerConfigurationImpl(className, options);
}
@Override
public String toString() {
return className + " " + options;
}
public static TSamplerConfiguration toThrift(SamplerConfiguration samplerConfig) {
if (samplerConfig == null) {
return null;
}
return new TSamplerConfiguration(samplerConfig.getSamplerClassName(),
samplerConfig.getOptions());
}
public static SamplerConfiguration fromThrift(TSamplerConfiguration tsc) {
if (tsc == null) {
return null;
}
return new SamplerConfiguration(tsc.getClassName()).setOptions(tsc.getOptions());
}
}
| 9,886 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/sample | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/sample/impl/SamplerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.sample.impl;
import java.io.IOException;
import org.apache.accumulo.core.classloader.ClassLoaderUtil;
import org.apache.accumulo.core.client.sample.Sampler;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SamplerFactory {
private static final Logger log = LoggerFactory.getLogger(SamplerFactory.class);
public static Sampler newSampler(SamplerConfigurationImpl config, AccumuloConfiguration acuconf,
boolean useAccumuloStart) {
String context = ClassLoaderUtil.tableContext(acuconf);
Class<? extends Sampler> clazz;
try {
if (!useAccumuloStart) {
clazz = SamplerFactory.class.getClassLoader().loadClass(config.getClassName())
.asSubclass(Sampler.class);
} else {
clazz = ClassLoaderUtil.loadClass(context, config.getClassName(), Sampler.class);
}
Sampler sampler = clazz.getDeclaredConstructor().newInstance();
sampler.validateOptions(config.getOptions());
sampler.init(config.toSamplerConfiguration());
return sampler;
} catch (ReflectiveOperationException | RuntimeException e) {
log.error("Cannot initialize sampler {}: {}", config.getClassName(), e.getMessage(), e);
return null;
}
}
public static Sampler newSampler(SamplerConfigurationImpl config, AccumuloConfiguration acuconf)
throws IOException {
return newSampler(config, acuconf, true);
}
}
| 9,887 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/gc/ReferenceDirectory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.gc;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.metadata.schema.MetadataSchema;
/**
* A GC reference to a Tablet directory, like t-0003.
*/
public class ReferenceDirectory extends ReferenceFile {
private final String tabletDir; // t-0003
public ReferenceDirectory(TableId tableId, String dirName) {
super(tableId, dirName, false);
MetadataSchema.TabletsSection.ServerColumnFamily.validateDirCol(dirName);
this.tabletDir = dirName;
}
@Override
public boolean isDirectory() {
return true;
}
public String getTabletDir() {
return tabletDir;
}
/**
* A Tablet directory should have a metadata entry equal to the dirName.
*/
@Override
public String getMetadataPath() {
if (!tabletDir.equals(metadataPath)) {
throw new IllegalStateException(
"Tablet dir " + tabletDir + " is not equal to metadataPath: " + metadataPath);
}
return metadataPath;
}
}
| 9,888 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/gc/ReferenceFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.gc;
import java.util.Objects;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.metadata.ScanServerRefTabletFile;
import org.apache.accumulo.core.metadata.StoredTabletFile;
import org.apache.hadoop.fs.Path;
/**
* A GC reference used for streaming and delete markers. This type is a file. Subclass is a
* directory.
*/
public class ReferenceFile implements Reference, Comparable<ReferenceFile> {
// parts of an absolute URI, like "hdfs://1.2.3.4/accumulo/tables/2a/t-0003"
public final TableId tableId; // 2a
public final boolean isScan;
// the exact path from the file reference string that is stored in the metadata
protected final String metadataPath;
protected ReferenceFile(TableId tableId, String metadataPath, boolean isScan) {
this.tableId = Objects.requireNonNull(tableId);
this.metadataPath = Objects.requireNonNull(metadataPath);
this.isScan = isScan;
}
public static ReferenceFile forFile(TableId tableId, StoredTabletFile tabletFile) {
return new ReferenceFile(tableId, tabletFile.getMetadataPath(), false);
}
public static ReferenceFile forFile(TableId tableId, Path metadataPathPath) {
return new ReferenceFile(tableId, metadataPathPath.toString(), false);
}
public static ReferenceFile forScan(TableId tableId, ScanServerRefTabletFile tabletFile) {
return new ReferenceFile(tableId, tabletFile.getNormalizedPathStr(), true);
}
public static ReferenceFile forScan(TableId tableId, StoredTabletFile tabletFile) {
return new ReferenceFile(tableId, tabletFile.getMetadataPath(), true);
}
public static ReferenceFile forScan(TableId tableId, Path metadataPathPath) {
return new ReferenceFile(tableId, metadataPathPath.toString(), true);
}
@Override
public boolean isDirectory() {
return false;
}
@Override
public boolean isScan() {
return isScan;
}
@Override
public TableId getTableId() {
return tableId;
}
@Override
public String getMetadataPath() {
return metadataPath;
}
@Override
public int compareTo(ReferenceFile that) {
if (equals(that)) {
return 0;
} else {
return this.metadataPath.compareTo(that.metadataPath);
}
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
ReferenceFile other = (ReferenceFile) obj;
return metadataPath.equals(other.metadataPath);
}
@Override
public int hashCode() {
return this.metadataPath.hashCode();
}
@Override
public String toString() {
return "Reference [id=" + tableId + ", ref=" + metadataPath + "]";
}
}
| 9,889 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/gc/GcCandidate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.gc;
import java.lang.Object;
import java.util.Objects;
public class GcCandidate implements Comparable<GcCandidate> {
private final long uid;
private final String path;
public GcCandidate(String path, long uid) {
this.path = path;
this.uid = uid;
}
public String getPath() {
return path;
}
public long getUid() {
return uid;
}
@Override
public int hashCode() {
return Objects.hash(path, uid);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof GcCandidate) {
GcCandidate candidate = (GcCandidate) obj;
return this.uid == candidate.getUid() && this.path.equals(candidate.getPath());
}
return false;
}
@Override
public int compareTo(GcCandidate candidate) {
var cmp = this.path.compareTo(candidate.getPath());
if (cmp == 0) {
return Long.compare(this.uid, candidate.getUid());
} else {
return cmp;
}
}
@Override
public String toString() {
return path + ", UUID: " + uid;
}
}
| 9,890 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/gc/Reference.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.gc;
import org.apache.accumulo.core.data.TableId;
/**
* A GC reference used for collecting files and directories into a single stream. The GC deals with
* two inputs conceptually: candidates and references. Candidates are files that could be possibly
* be deleted if they are not defeated by a reference.
*/
public interface Reference {
/**
* Only return true if the reference is a directory.
*/
boolean isDirectory();
/**
* Only return true if the reference is a scan.
*/
boolean isScan();
/**
* Get the {@link TableId} of the reference.
*/
TableId getTableId();
/**
* Get the path stored in the metadata table for this file or directory. The path will be read
* from the Tablet "file" column family:
* {@link org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily}
* A directory will be read from the "srv:dir" column family:
* {@link org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily}
* A scan will be read from the Tablet "scan" column family:
* {@link org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily}
*/
String getMetadataPath();
}
| 9,891 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/tabletserver | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/tabletserver/log/LogEntry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.tabletserver.log;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.UUID;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.hadoop.io.Text;
import com.google.common.net.HostAndPort;
public class LogEntry {
private final String filePath;
public LogEntry(String filePath) {
validateFilePath(filePath);
this.filePath = filePath;
}
public String getFilePath() {
return this.filePath;
}
/**
* Validates the expected format of the file path. We expect the path to contain a tserver
* (host:port) followed by a UUID as the file name. For example,
* localhost:1234/927ba659-d109-4bce-b0a5-bcbbcb9942a2 is a valid file path.
*
* @param filePath path to validate
* @throws IllegalArgumentException if the filepath is invalid
*/
private static void validateFilePath(String filePath) {
String[] parts = filePath.split("/");
if (parts.length < 2) {
throw new IllegalArgumentException(
"Invalid filePath format. The path should at least contain tserver/UUID.");
}
String tserverPart = parts[parts.length - 2];
String uuidPart = parts[parts.length - 1];
try {
var ignored = HostAndPort.fromString(tserverPart);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
"Invalid tserver format in filePath. Expected format: host:port. Found '" + tserverPart
+ "'");
}
try {
var ignored = UUID.fromString(uuidPart);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Expected valid UUID. Found '" + uuidPart + "'");
}
}
/**
* Make a copy of this LogEntry but replace the file path.
*
* @param filePath path to use
*/
public LogEntry switchFile(String filePath) {
return new LogEntry(filePath);
}
@Override
public String toString() {
return filePath;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof LogEntry)) {
return false;
}
LogEntry logEntry = (LogEntry) other;
return this.filePath.equals(logEntry.filePath);
}
@Override
public int hashCode() {
return Objects.hash(filePath);
}
public static LogEntry fromMetaWalEntry(Entry<Key,Value> entry) {
final Value value = entry.getValue();
String filePath = value.toString();
validateFilePath(filePath);
return new LogEntry(filePath);
}
public String getUniqueID() {
String[] parts = filePath.split("/");
return parts[parts.length - 1];
}
public Text getColumnQualifier() {
return new Text("-/" + filePath);
}
public Value getValue() {
return new Value(filePath);
}
}
| 9,892 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/bloomfilter/DynamicBloomFilter.java | /*
* Copyright (c) 2005, European Commission project OneLab under contract 034819
* (http://www.one-lab.org)
* All rights reserved.
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the distribution.
* - Neither the name of the University Catholique de Louvain - UCL
* nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.accumulo.core.bloomfilter;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.util.bloom.Key;
/**
* Implements a <i>dynamic Bloom filter</i>, as defined in the INFOCOM 2006 paper.
* <p>
* A dynamic Bloom filter (DBF) makes use of a <code>s * m</code> bit matrix but each of the
* <code>s</code> rows is a standard Bloom filter. The creation process of a DBF is iterative. At
* the start, the DBF is a <code>1 * m</code> bit matrix, i.e., it is composed of a single standard
* Bloom filter. It assumes that <code>n<sub>r</sub></code> elements are recorded in the initial bit
* vector, where <code>n<sub>r</sub> <= n</code> (<code>n</code> is the cardinality of the set
* <code>A</code> to record in the filter).
* <p>
* As the size of <code>A</code> grows during the execution of the application, several keys must be
* inserted in the DBF. When inserting a key into the DBF, one must first get an active Bloom filter
* in the matrix. A Bloom filter is active when the number of recorded keys,
* <code>n<sub>r</sub></code>, is strictly less than the current cardinality of <code>A</code>,
* <code>n</code>. If an active Bloom filter is found, the key is inserted and
* <code>n<sub>r</sub></code> is incremented by one. On the other hand, if there is no active Bloom
* filter, a new one is created (i.e., a new row is added to the matrix) according to the current
* size of <code>A</code> and the element is added in this new Bloom filter and the
* <code>n<sub>r</sub></code> value of this new Bloom filter is set to one. A given key is said to
* belong to the DBF if the <code>k</code> positions are set to one in one of the matrix rows.
* <p>
* Originally created by <a href="http://www.one-lab.org">European Commission One-Lab Project
* 034819</a>.
*
* @see Filter The general behavior of a filter
* @see BloomFilter A Bloom filter
*
* @see <a href=
* "https://www.cse.fau.edu/~jie/research/publications/Publication_files/infocom2006.pdf">Theory
* and Network Applications of Dynamic Bloom Filters</a>
*/
public class DynamicBloomFilter extends Filter {
/**
* Threshold for the maximum number of key to record in a dynamic Bloom filter row.
*/
private int nr;
/**
* The number of keys recorded in the current standard active Bloom filter.
*/
private int currentNbRecord;
/**
* The matrix of Bloom filter.
*/
private BloomFilter[] matrix;
/**
* Zero-args constructor for the serialization.
*/
public DynamicBloomFilter() {}
/**
* Constructor.
* <p>
* Builds an empty Dynamic Bloom filter.
*
* @param vectorSize The number of bits in the vector.
* @param nbHash The number of hash function to consider.
* @param hashType type of the hashing function (see {@link org.apache.hadoop.util.hash.Hash}).
* @param nr The threshold for the maximum number of keys to record in a dynamic Bloom filter row.
*/
public DynamicBloomFilter(final int vectorSize, final int nbHash, final int hashType,
final int nr) {
super(vectorSize, nbHash, hashType);
this.nr = nr;
this.currentNbRecord = 0;
matrix = new BloomFilter[1];
matrix[0] = new BloomFilter(this.vectorSize, this.nbHash, this.hashType);
}
@Override
public boolean add(final Key key) {
if (key == null) {
throw new NullPointerException("Key can not be null");
}
BloomFilter bf = getActiveStandardBF();
if (bf == null) {
addRow();
bf = matrix[matrix.length - 1];
currentNbRecord = 0;
}
boolean added = bf.add(key);
if (added) {
currentNbRecord++;
}
return added;
}
@Override
public void and(final Filter filter) {
if (filter == null || !(filter instanceof DynamicBloomFilter)
|| filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) {
throw new IllegalArgumentException("filters cannot be and-ed");
}
DynamicBloomFilter dbf = (DynamicBloomFilter) filter;
if (dbf.matrix.length != this.matrix.length || dbf.nr != this.nr) {
throw new IllegalArgumentException("filters cannot be and-ed");
}
for (int i = 0; i < matrix.length; i++) {
matrix[i].and(dbf.matrix[i]);
}
}
@Override
public boolean membershipTest(final Key key) {
if (key == null) {
return true;
}
for (BloomFilter bloomFilter : matrix) {
if (bloomFilter.membershipTest(key)) {
return true;
}
}
return false;
}
@Override
public void not() {
for (BloomFilter bloomFilter : matrix) {
bloomFilter.not();
}
}
@Override
public void or(final Filter filter) {
if (filter == null || !(filter instanceof DynamicBloomFilter)
|| filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) {
throw new IllegalArgumentException("filters cannot be or-ed");
}
DynamicBloomFilter dbf = (DynamicBloomFilter) filter;
if (dbf.matrix.length != this.matrix.length || dbf.nr != this.nr) {
throw new IllegalArgumentException("filters cannot be or-ed");
}
for (int i = 0; i < matrix.length; i++) {
matrix[i].or(dbf.matrix[i]);
}
}
@Override
public void xor(final Filter filter) {
if (filter == null || !(filter instanceof DynamicBloomFilter)
|| filter.vectorSize != this.vectorSize || filter.nbHash != this.nbHash) {
throw new IllegalArgumentException("filters cannot be xor-ed");
}
DynamicBloomFilter dbf = (DynamicBloomFilter) filter;
if (dbf.matrix.length != this.matrix.length || dbf.nr != this.nr) {
throw new IllegalArgumentException("filters cannot be xor-ed");
}
for (int i = 0; i < matrix.length; i++) {
matrix[i].xor(dbf.matrix[i]);
}
}
@Override
public String toString() {
StringBuilder res = new StringBuilder();
for (BloomFilter bloomFilter : matrix) {
res.append(bloomFilter);
res.append(Character.LINE_SEPARATOR);
}
return res.toString();
}
// Writable
@Override
public void write(final DataOutput out) throws IOException {
super.write(out);
out.writeInt(nr);
out.writeInt(currentNbRecord);
out.writeInt(matrix.length);
for (BloomFilter bloomFilter : matrix) {
bloomFilter.write(out);
}
}
@Override
public void readFields(final DataInput in) throws IOException {
super.readFields(in);
nr = in.readInt();
currentNbRecord = in.readInt();
int len = in.readInt();
matrix = new BloomFilter[len];
for (int i = 0; i < matrix.length; i++) {
matrix[i] = new BloomFilter();
matrix[i].readFields(in);
}
}
/**
* Adds a new row to <i>this</i> dynamic Bloom filter.
*/
private void addRow() {
BloomFilter[] tmp = new BloomFilter[matrix.length + 1];
for (int i = 0; i < matrix.length; i++) {
tmp[i] = matrix[i];
}
tmp[tmp.length - 1] = new BloomFilter(vectorSize, nbHash, hashType);
matrix = tmp;
}
/**
* Returns the active standard Bloom filter in <i>this</i> dynamic Bloom filter.
*
* @return BloomFilter The active standard Bloom filter. <code>Null</code> otherwise.
*/
private BloomFilter getActiveStandardBF() {
if (currentNbRecord >= nr) {
return null;
}
return matrix[matrix.length - 1];
}
}
| 9,893 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/bloomfilter/BloomFilter.java | /*
* Copyright (c) 2005, European Commission project OneLab under contract 034819
* (http://www.one-lab.org)
* All rights reserved.
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the distribution.
* - Neither the name of the University Catholique de Louvain - UCL
* nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.accumulo.core.bloomfilter;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.BitSet;
import org.apache.hadoop.util.bloom.Key;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
/**
* Implements a <i>Bloom filter</i>, as defined by Bloom in 1970.
* <p>
* The Bloom filter is a data structure that was introduced in 1970 and that has been adopted by the
* networking research community in the past decade thanks to the bandwidth efficiencies that it
* offers for the transmission of set membership information between networked hosts. A sender
* encodes the information into a bit vector, the Bloom filter, that is more compact than a
* conventional representation. Computation and space costs for construction are linear in the
* number of elements. The receiver uses the filter to test whether various elements are members of
* the set. Though the filter will occasionally return a false positive, it will never return a
* false negative. When creating the filter, the sender can choose its desired point in a trade-off
* between the false positive rate and the size.
*
* <p>
* Originally created by <a href="http://www.one-lab.org">European Commission One-Lab Project
* 034819</a>.
*
* @see Filter The general behavior of a filter
*
* @see <a href="https://portal.acm.org/citation.cfm?id=362692&dl=ACM&coll=portal">Space/Time
* Trade-Offs in Hash Coding with Allowable Errors</a>
*/
public class BloomFilter extends Filter {
private static final Logger log = LoggerFactory.getLogger(BloomFilter.class);
private static final byte[] bitvalues = {(byte) 0x01, (byte) 0x02, (byte) 0x04, (byte) 0x08,
(byte) 0x10, (byte) 0x20, (byte) 0x40, (byte) 0x80};
/** The bit vector. */
BitSet bits;
/** Default constructor - use with readFields */
public BloomFilter() {}
/**
* Constructor
*
* @param vectorSize The vector size of <i>this</i> filter.
* @param nbHash The number of hash function to consider.
* @param hashType type of the hashing function (see {@link org.apache.hadoop.util.hash.Hash}).
*/
public BloomFilter(final int vectorSize, final int nbHash, final int hashType) {
super(vectorSize, nbHash, hashType);
bits = new BitSet(this.vectorSize);
}
@Override
public boolean add(final Key key) {
if (key == null) {
throw new NullPointerException("key cannot be null");
}
int[] h = hash.hash(key);
hash.clear();
boolean bitsSet = false;
for (int i = 0; i < nbHash; i++) {
bitsSet |= !bits.get(h[i]);
bits.set(h[i]);
}
return bitsSet;
}
@Override
public void and(final Filter filter) {
if (filter == null || !(filter instanceof BloomFilter) || filter.vectorSize != this.vectorSize
|| filter.nbHash != this.nbHash) {
throw new IllegalArgumentException("filters cannot be and-ed");
}
this.bits.and(((BloomFilter) filter).bits);
}
@Override
public boolean membershipTest(final Key key) {
if (key == null) {
throw new NullPointerException("key cannot be null");
}
int[] h = hash.hash(key);
hash.clear();
for (int i = 0; i < nbHash; i++) {
if (!bits.get(h[i])) {
return false;
}
}
return true;
}
@Override
public void not() {
bits.flip(0, vectorSize - 1);
}
@Override
public void or(final Filter filter) {
if (filter == null || !(filter instanceof BloomFilter) || filter.vectorSize != this.vectorSize
|| filter.nbHash != this.nbHash) {
throw new IllegalArgumentException("filters cannot be or-ed");
}
bits.or(((BloomFilter) filter).bits);
}
@Override
public void xor(final Filter filter) {
if (filter == null || !(filter instanceof BloomFilter) || filter.vectorSize != this.vectorSize
|| filter.nbHash != this.nbHash) {
throw new IllegalArgumentException("filters cannot be xor-ed");
}
bits.xor(((BloomFilter) filter).bits);
}
@Override
public String toString() {
return bits.toString();
}
/**
* @return size of the the bloomfilter
*/
public int getVectorSize() {
return this.vectorSize;
}
// Writable
@Override
public void write(final DataOutput out) throws IOException {
super.write(out);
ByteArrayOutputStream boas = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(boas);
oos.writeObject(bits);
oos.flush();
oos.close();
out.write(boas.toByteArray());
}
@SuppressFBWarnings(value = {"OS_OPEN_STREAM", "OBJECT_DESERIALIZATION"},
justification = "Caller is responsible for closing input stream supplied as a parameter; "
+ "BitSet deserialization is unsafe, but can't update it until RFile version change")
@Override
public void readFields(final DataInput in) throws IOException {
super.readFields(in);
bits = new BitSet(this.vectorSize);
byte[] bytes = null;
if (super.getSerialVersion() != super.getVersion()) {
bytes = new byte[getNBytes()];
in.readFully(bytes);
}
if (super.getSerialVersion() == super.getVersion()) {
ObjectInputStream ois = new ObjectInputStream((DataInputStream) in);
try {
bits = (BitSet) ois.readObject();
} catch (ClassNotFoundException e) {
log.error("BloomFilter tried to deserialize as bitset", e);
throw new IOException("BloomFilter tried to deserialize as bitset: " + e);
}
} else {
for (int i = 0, byteIndex = 0, bitIndex = 0; i < vectorSize; i++, bitIndex++) {
if (bitIndex == 8) {
bitIndex = 0;
byteIndex++;
}
if ((bytes[byteIndex] & bitvalues[bitIndex]) != 0) {
bits.set(i);
}
}
}
}
/* @return number of bytes needed to hold bit vector */
private int getNBytes() {
return (vectorSize + 7) / 8;
}
}// end class
| 9,894 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/bloomfilter/Filter.java | /*
* Copyright (c) 2005, European Commission project OneLab under contract 034819
* (http://www.one-lab.org)
* All rights reserved.
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the distribution.
* - Neither the name of the University Catholique de Louvain - UCL
* nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.accumulo.core.bloomfilter;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.bloom.HashFunction;
import org.apache.hadoop.util.bloom.Key;
import org.apache.hadoop.util.hash.Hash;
/**
* Defines the general behavior of a filter.
* <p>
* A filter is a data structure which aims at offering a lossy summary of a set <code>A</code>. The
* key idea is to map entries of <code>A</code> (also called <i>keys</i>) into several positions in
* a vector through the use of several hash functions.
* <p>
* Typically, a filter will be implemented as a Bloom filter (or a Bloom filter extension).
* <p>
* It must be extended in order to define the real behavior.
*
* @see Key The general behavior of a key
* @see HashFunction A hash function
*/
public abstract class Filter implements Writable {
private static final int VERSION = -2; // negative to accommodate for old format
/** The vector size of <i>this</i> filter. */
protected int vectorSize;
private int rVersion;
/** The hash function used to map a key to several positions in the vector. */
protected HashFunction hash;
/** The number of hash function to consider. */
protected int nbHash;
/** Type of hashing function to use. */
protected int hashType;
protected Filter() {}
/**
* Constructor.
*
* @param vectorSize The vector size of <i>this</i> filter.
* @param nbHash The number of hash functions to consider.
* @param hashType type of the hashing function (see {@link Hash}).
*/
protected Filter(final int vectorSize, final int nbHash, final int hashType) {
this.vectorSize = vectorSize;
this.nbHash = nbHash;
this.hashType = hashType;
this.hash = new HashFunction(this.vectorSize, this.nbHash, this.hashType);
}
/**
* Adds a key to <i>this</i> filter.
*
* @param key The key to add.
* @return true if the key was added, false otherwise.
*/
public abstract boolean add(Key key);
/**
* Determines whether a specified key belongs to <i>this</i> filter.
*
* @param key The key to test.
* @return boolean True if the specified key belongs to <i>this</i> filter. False otherwise.
*/
public abstract boolean membershipTest(Key key);
/**
* Performs a logical AND between <i>this</i> filter and a specified filter.
* <p>
* <b>Invariant</b>: The result is assigned to <i>this</i> filter.
*
* @param filter The filter to AND with.
*/
public abstract void and(Filter filter);
/**
* Performs a logical OR between <i>this</i> filter and a specified filter.
* <p>
* <b>Invariant</b>: The result is assigned to <i>this</i> filter.
*
* @param filter The filter to OR with.
*/
public abstract void or(Filter filter);
/**
* Performs a logical XOR between <i>this</i> filter and a specified filter.
* <p>
* <b>Invariant</b>: The result is assigned to <i>this</i> filter.
*
* @param filter The filter to XOR with.
*/
public abstract void xor(Filter filter);
/**
* Performs a logical NOT on <i>this</i> filter.
* <p>
* The result is assigned to <i>this</i> filter.
*/
public abstract void not();
// Writable interface
@Override
public void write(final DataOutput out) throws IOException {
out.writeInt(VERSION);
out.writeInt(this.nbHash);
out.writeByte(this.hashType);
out.writeInt(this.vectorSize);
}
protected int getSerialVersion() {
return rVersion;
}
protected int getVersion() {
return VERSION;
}
@Override
public void readFields(final DataInput in) throws IOException {
final int ver = in.readInt();
rVersion = ver;
if (ver > 0) { // old unversioned format
this.nbHash = ver;
this.hashType = Hash.JENKINS_HASH;
} else if (ver == VERSION | ver == VERSION + 1) { // Support for directly serializing the bitset
this.nbHash = in.readInt();
this.hashType = in.readByte();
} else {
throw new IOException("Unsupported version: " + ver);
}
this.vectorSize = in.readInt();
this.hash = new HashFunction(this.vectorSize, this.nbHash, this.hashType);
}
}// end class
| 9,895 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/ThriftUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.InetAddress;
import java.nio.channels.ClosedByInterruptException;
import java.util.HashMap;
import java.util.Map;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.rpc.SaslConnectionParams.SaslMechanism;
import org.apache.accumulo.core.rpc.clients.ThriftClientTypes;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.thrift.TException;
import org.apache.thrift.TServiceClient;
import org.apache.thrift.protocol.TProtocolFactory;
import org.apache.thrift.transport.TSSLTransportFactory;
import org.apache.thrift.transport.TSaslClientTransport;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import org.apache.thrift.transport.TTransportFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.net.HostAndPort;
/**
* Factory methods for creating Thrift client objects
*/
public class ThriftUtil {
private static final Logger log = LoggerFactory.getLogger(ThriftUtil.class);
private static final TraceProtocolFactory protocolFactory = new TraceProtocolFactory();
private static final AccumuloTFramedTransportFactory transportFactory =
new AccumuloTFramedTransportFactory(Integer.MAX_VALUE);
private static final Map<Integer,TTransportFactory> factoryCache = new HashMap<>();
public static final String GSSAPI = "GSSAPI", DIGEST_MD5 = "DIGEST-MD5";
private static final int RELOGIN_MAX_BACKOFF = 5000;
/**
* An instance of {@link TraceProtocolFactory}
*
* @return The default Thrift TProtocolFactory for RPC
*/
public static TProtocolFactory protocolFactory() {
return protocolFactory;
}
/**
* An instance of {@link org.apache.thrift.transport.layered.TFramedTransport.Factory}
*
* @return The default Thrift TTransportFactory for RPC
*/
public static TTransportFactory transportFactory() {
return transportFactory;
}
/**
* Create a Thrift client using the given factory and transport
*/
public static <T extends TServiceClient> T createClient(ThriftClientTypes<T> type,
TTransport transport) {
return type.getClient(protocolFactory.getProtocol(transport));
}
/**
* Create a Thrift client using the given factory with a pooled transport (if available), the
* address, and client context with no timeout.
*
* @param type Thrift client type
* @param address Server address for client to connect to
* @param context RPC options
*/
public static <T extends TServiceClient> T getClientNoTimeout(ThriftClientTypes<T> type,
HostAndPort address, ClientContext context) throws TTransportException {
return getClient(type, address, context, 0);
}
/**
* Create a Thrift client using the given factory with a pooled transport (if available), the
* address and client context. Client timeout is extracted from the ClientContext
*
* @param type Thrift client type
* @param address Server address for client to connect to
* @param context RPC options
*/
public static <T extends TServiceClient> T getClient(ThriftClientTypes<T> type,
HostAndPort address, ClientContext context) throws TTransportException {
TTransport transport = context.getTransportPool().getTransport(address,
context.getClientTimeoutInMillis(), context);
return createClient(type, transport);
}
/**
* Create a Thrift client using the given factory with a pooled transport (if available) using the
* address, client context and timeout
*
* @param type Thrift client type
* @param address Server address for client to connect to
* @param context RPC options
* @param timeout Socket timeout which overrides the ClientContext timeout
*/
public static <T extends TServiceClient> T getClient(ThriftClientTypes<T> type,
HostAndPort address, ClientContext context, long timeout) throws TTransportException {
TTransport transport = context.getTransportPool().getTransport(address, timeout, context);
return createClient(type, transport);
}
public static void close(TServiceClient client, ClientContext context) {
if (client != null && client.getInputProtocol() != null
&& client.getInputProtocol().getTransport() != null) {
context.getTransportPool().returnTransport(client.getInputProtocol().getTransport());
} else {
log.debug("Attempt to close null connection to a server", new Exception());
}
}
/**
* Return the transport used by the client to the shared pool.
*
* @param iface The Client being returned or null.
*/
public static void returnClient(TServiceClient iface, ClientContext context) {
if (iface != null) {
context.getTransportPool().returnTransport(iface.getInputProtocol().getTransport());
}
}
/**
* Create a transport that is not pooled
*
* @param address Server address to open the transport to
* @param context RPC options
*/
public static TTransport createTransport(HostAndPort address, ClientContext context)
throws TException {
return createClientTransport(address, (int) context.getClientTimeoutInMillis(),
context.getClientSslParams(), context.getSaslParams());
}
/**
* Get an instance of the TTransportFactory with the provided maximum frame size
*
* @param maxFrameSize Maximum Thrift message frame size
* @return A, possibly cached, TTransportFactory with the requested maximum frame size
*/
public static synchronized TTransportFactory transportFactory(long maxFrameSize) {
if (maxFrameSize > Integer.MAX_VALUE || maxFrameSize < 1) {
throw new RuntimeException("Thrift transport frames are limited to " + Integer.MAX_VALUE);
}
int maxFrameSize1 = (int) maxFrameSize;
TTransportFactory factory = factoryCache.get(maxFrameSize1);
if (factory == null) {
factory = new AccumuloTFramedTransportFactory(maxFrameSize1);
factoryCache.put(maxFrameSize1, factory);
}
return factory;
}
/**
* Create a TTransport for clients to the given address with the provided socket timeout and
* session-layer configuration
*
* @param address Server address to connect to
* @param timeout Client socket timeout
* @param sslParams RPC options for SSL servers
* @param saslParams RPC options for SASL servers
* @return An open TTransport which must be closed when finished
*/
public static TTransport createClientTransport(HostAndPort address, int timeout,
SslConnectionParams sslParams, SaslConnectionParams saslParams) throws TTransportException {
boolean success = false;
TTransport transport = null;
try {
if (sslParams != null) {
// The check in AccumuloServerContext ensures that servers are brought up with sane
// configurations, but we also want to validate clients
if (saslParams != null) {
throw new IllegalStateException("Cannot use both SSL and SASL");
}
log.trace("Creating SSL client transport");
// TSSLTransportFactory handles timeout 0 -> forever natively
if (sslParams.useJsse()) {
transport =
TSSLTransportFactory.getClientSocket(address.getHost(), address.getPort(), timeout);
} else {
transport = TSSLTransportFactory.getClientSocket(address.getHost(), address.getPort(),
timeout, sslParams.getTSSLTransportParameters());
}
// TSSLTransportFactory leaves transports open, so no need to open here
transport = ThriftUtil.transportFactory().getTransport(transport);
} else if (saslParams != null) {
if (!UserGroupInformation.isSecurityEnabled()) {
throw new IllegalStateException(
"Expected Kerberos security to be enabled if SASL is in use");
}
log.trace("Creating SASL connection to {}:{}", address.getHost(), address.getPort());
// Make sure a timeout is set
try {
transport = TTimeoutTransport.create(address, timeout);
} catch (TTransportException e) {
log.warn("Failed to open transport to {}", address);
throw e;
}
try {
// Log in via UGI, ensures we have logged in with our KRB credentials
final UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
final UserGroupInformation userForRpc;
if (currentUser.getAuthenticationMethod() == AuthenticationMethod.PROXY) {
// A "proxy" user is when the real (Kerberos) credentials are for a user
// other than the one we're acting as. When we make an RPC though, we need to make sure
// that the current user is the user that has some credentials.
if (currentUser.getRealUser() != null) {
userForRpc = currentUser.getRealUser();
log.trace("{} is a proxy user, using real user instead {}", currentUser, userForRpc);
} else {
// The current user has no credentials, let it fail naturally at the RPC layer (no
// ticket)
// We know this won't work, but we can't do anything else
log.warn("The current user is a proxy user but there is no"
+ " underlying real user (likely that RPCs will fail): {}", currentUser);
userForRpc = currentUser;
}
} else {
// The normal case: the current user has its own ticket
userForRpc = currentUser;
}
// Is this pricey enough that we want to cache it?
final String hostname = InetAddress.getByName(address.getHost()).getCanonicalHostName();
final SaslMechanism mechanism = saslParams.getMechanism();
log.trace("Opening transport to server as {} to {}/{} using {}", userForRpc,
saslParams.getKerberosServerPrimary(), hostname, mechanism);
// Create the client SASL transport using the information for the server
// Despite the 'protocol' argument seeming to be useless, it *must* be the primary of the
// server being connected to
transport = new TSaslClientTransport(mechanism.getMechanismName(), null,
saslParams.getKerberosServerPrimary(), hostname, saslParams.getSaslProperties(),
saslParams.getCallbackHandler(), transport);
// Wrap it all in a processor which will run with a doAs the current user
transport = new UGIAssumingTransport(transport, userForRpc);
// Open the transport
transport.open();
} catch (TTransportException e) {
log.warn("Failed to open SASL transport", e);
// We might have had a valid ticket, but it expired. We'll let the caller retry, but we
// will attempt to re-login to make the next attempt work.
// Sadly, we have no way to determine the actual reason we got this TTransportException
// other than inspecting the exception msg.
log.debug("Caught TTransportException opening SASL transport,"
+ " checking if re-login is necessary before propagating the exception.");
attemptClientReLogin();
throw e;
} catch (IOException e) {
log.warn("Failed to open SASL transport", e);
ThriftUtil.checkIOExceptionCause(e);
throw new TTransportException(e);
}
} else {
log.trace("Opening normal transport");
if (timeout == 0) {
transport = new TSocket(address.getHost(), address.getPort());
transport.open();
} else {
try {
transport = TTimeoutTransport.create(address, timeout);
} catch (TTransportException ex) {
log.warn("Failed to open transport to {}", address);
throw ex;
}
// Open the transport
transport.open();
}
transport = ThriftUtil.transportFactory().getTransport(transport);
}
success = true;
return transport;
} finally {
if (!success && transport != null) {
transport.close();
}
}
}
/**
* Some wonderful snippets of documentation from HBase on performing the re-login client-side (as
* well as server-side) in the following paragraph. We want to attempt a re-login to automatically
* refresh the client's Krb "credentials" (remember, a server might also be a client, manager
* sending RPC to tserver), but we have to take care to avoid Kerberos' replay attack protection.
* <p>
* If multiple clients with the same principal try to connect to the same server at the same time,
* the server assumes a replay attack is in progress. This is a feature of kerberos. In order to
* work around this, what is done is that the client backs off randomly and tries to initiate the
* connection again. The other problem is to do with ticket expiry. To handle that, a relogin is
* attempted.
*/
private static void attemptClientReLogin() {
try {
UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
if (loginUser == null || !loginUser.hasKerberosCredentials()) {
// We should have already checked that we're logged in and have credentials. A
// precondition-like check.
throw new IllegalStateException("Expected to find Kerberos UGI credentials, but did not");
}
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
// A Proxy user is the "effective user" (in name only), riding on top of the "real user"'s Krb
// credentials.
UserGroupInformation realUser = currentUser.getRealUser();
// re-login only in case it is the login user or superuser.
if (loginUser.equals(currentUser) || loginUser.equals(realUser)) {
if (UserGroupInformation.isLoginKeytabBased()) {
log.info("Performing keytab-based Kerberos re-login");
loginUser.reloginFromKeytab();
} else {
log.info("Performing ticket-cache-based Kerberos re-login");
loginUser.reloginFromTicketCache();
}
// Avoid the replay attack protection, sleep 1 to 5000ms
try {
Thread.sleep(RANDOM.get().nextInt(RELOGIN_MAX_BACKOFF) + 1);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
} else {
log.debug("Not attempting Kerberos re-login: loginUser={}, currentUser={}, realUser={}",
loginUser, currentUser, realUser);
}
} catch (IOException e) {
// The inability to check is worrisome and deserves a RuntimeException instead of a propagated
// IO-like Exception.
log.warn("Failed to check (and/or perform) Kerberos client re-login", e);
throw new UncheckedIOException(e);
}
}
public static void checkIOExceptionCause(IOException e) {
if (e instanceof ClosedByInterruptException) {
Thread.currentThread().interrupt();
throw new UncheckedIOException(e);
}
}
}
| 9,896 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/SslConnectionParams.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import java.io.File;
import java.io.FileNotFoundException;
import java.net.URL;
import java.util.Arrays;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class SslConnectionParams {
private static final Logger log = LoggerFactory.getLogger(SslConnectionParams.class);
private boolean useJsse = false;
private boolean clientAuth = false;
private boolean keyStoreSet;
private String keyStorePath;
private String keyStorePass;
private String keyStoreType;
private boolean trustStoreSet;
private String trustStorePath;
private String trustStorePass;
private String trustStoreType;
private String[] cipherSuites;
private String[] serverProtocols;
private String clientProtocol;
// Use the static construction methods
private SslConnectionParams() {}
public static SslConnectionParams forConfig(AccumuloConfiguration conf, boolean server) {
if (!conf.getBoolean(Property.INSTANCE_RPC_SSL_ENABLED)) {
return null;
}
SslConnectionParams result = new SslConnectionParams();
boolean requireClientAuth = conf.getBoolean(Property.INSTANCE_RPC_SSL_CLIENT_AUTH);
if (server) {
result.setClientAuth(requireClientAuth);
}
if (conf.getBoolean(Property.RPC_USE_JSSE)) {
result.setUseJsse(true);
return result;
}
try {
if (!server || requireClientAuth) {
result.setTrustStoreFromConf(conf);
}
if (server || requireClientAuth) {
result.setKeyStoreFromConf(conf);
}
} catch (FileNotFoundException e) {
throw new IllegalArgumentException("Could not load configured keystore file", e);
}
String ciphers = conf.get(Property.RPC_SSL_CIPHER_SUITES);
if (ciphers != null && !ciphers.isEmpty()) {
result.cipherSuites = ciphers.split(",");
}
String enabledProtocols = conf.get(Property.RPC_SSL_ENABLED_PROTOCOLS);
result.serverProtocols = enabledProtocols.split(",");
result.clientProtocol = conf.get(Property.RPC_SSL_CLIENT_PROTOCOL);
return result;
}
private static String passwordFromConf(AccumuloConfiguration conf, String defaultPassword,
Property passwordOverrideProperty) {
String keystorePassword = conf.get(passwordOverrideProperty);
if (keystorePassword.isEmpty()) {
keystorePassword = defaultPassword;
} else {
if (log.isTraceEnabled()) {
log.trace("Using explicit SSL private key password from {}",
passwordOverrideProperty.getKey());
}
}
return keystorePassword;
}
private static String storePathFromConf(AccumuloConfiguration conf, Property pathProperty)
throws FileNotFoundException {
return findKeystore(conf.getPath(pathProperty));
}
public void setKeyStoreFromConf(AccumuloConfiguration conf) throws FileNotFoundException {
keyStoreSet = true;
keyStorePath = storePathFromConf(conf, Property.RPC_SSL_KEYSTORE_PATH);
keyStorePass = passwordFromConf(conf, conf.get(Property.INSTANCE_SECRET),
Property.RPC_SSL_KEYSTORE_PASSWORD);
keyStoreType = conf.get(Property.RPC_SSL_KEYSTORE_TYPE);
}
public void setTrustStoreFromConf(AccumuloConfiguration conf) throws FileNotFoundException {
trustStoreSet = true;
trustStorePath = storePathFromConf(conf, Property.RPC_SSL_TRUSTSTORE_PATH);
trustStorePass = passwordFromConf(conf, "", Property.RPC_SSL_TRUSTSTORE_PASSWORD);
trustStoreType = conf.get(Property.RPC_SSL_TRUSTSTORE_TYPE);
}
public static SslConnectionParams forServer(AccumuloConfiguration configuration) {
return forConfig(configuration, true);
}
public static SslConnectionParams forClient(AccumuloConfiguration configuration) {
return forConfig(configuration, false);
}
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "code runs in same security context as user who providing the keystore file")
private static String findKeystore(String keystorePath) throws FileNotFoundException {
try {
// first just try the file
File file = new File(keystorePath);
if (file.exists()) {
return file.getAbsolutePath();
}
if (!file.isAbsolute()) {
// try classpath
URL url = SslConnectionParams.class.getClassLoader().getResource(keystorePath);
if (url != null) {
file = new File(url.toURI());
if (file.exists()) {
return file.getAbsolutePath();
}
}
}
} catch (Exception e) {
log.warn("Exception finding keystore", e);
}
throw new FileNotFoundException("Failed to load SSL keystore from " + keystorePath);
}
public void setUseJsse(boolean useJsse) {
this.useJsse = useJsse;
}
public boolean useJsse() {
return useJsse;
}
public void setClientAuth(boolean clientAuth) {
this.clientAuth = clientAuth;
}
public boolean isClientAuth() {
return clientAuth;
}
public String[] getServerProtocols() {
return serverProtocols;
}
public String getClientProtocol() {
return clientProtocol;
}
public boolean isKeyStoreSet() {
return keyStoreSet;
}
public String getKeyStorePath() {
return keyStorePath;
}
/**
* @return the keyStorePass
*/
public String getKeyStorePass() {
return keyStorePass;
}
public String getKeyStoreType() {
return keyStoreType;
}
public boolean isTrustStoreSet() {
return trustStoreSet;
}
public String getTrustStorePath() {
return trustStorePath;
}
public String getTrustStorePass() {
return trustStorePass;
}
/**
* @return the trustStoreType
*/
public String getTrustStoreType() {
return trustStoreType;
}
public TSSLTransportParameters getTSSLTransportParameters() {
if (useJsse) {
throw new IllegalStateException("Cannot get TSSLTransportParameters for JSSE configuration.");
}
TSSLTransportParameters params = new TSSLTransportParameters(clientProtocol, cipherSuites);
params.requireClientAuth(clientAuth);
if (keyStoreSet) {
params.setKeyStore(keyStorePath, keyStorePass, null, keyStoreType);
}
if (trustStoreSet) {
params.setTrustStore(trustStorePath, trustStorePass, null, trustStoreType);
}
return params;
}
@Override
public int hashCode() {
int hash = 0;
hash = 31 * hash + (clientAuth ? 0 : 1);
hash = 31 * hash + (useJsse ? 0 : 1);
if (useJsse) {
return hash;
}
hash = 31 * hash + (keyStoreSet ? 0 : 1);
hash = 31 * hash + (trustStoreSet ? 0 : 1);
if (keyStoreSet) {
hash = 31 * hash + keyStorePath.hashCode();
}
if (trustStoreSet) {
hash = 31 * hash + trustStorePath.hashCode();
}
hash = 31 * hash + clientProtocol.hashCode();
hash = 31 * hash + Arrays.hashCode(serverProtocols);
return hash;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof SslConnectionParams)) {
return false;
}
SslConnectionParams other = (SslConnectionParams) obj;
if (clientAuth != other.clientAuth) {
return false;
}
if (useJsse) {
return other.useJsse;
}
if (keyStoreSet) {
if (!other.keyStoreSet) {
return false;
}
if (!keyStorePath.equals(other.keyStorePath) || !keyStorePass.equals(other.keyStorePass)
|| !keyStoreType.equals(other.keyStoreType)) {
return false;
}
}
if (trustStoreSet) {
if (!other.trustStoreSet) {
return false;
}
if (!trustStorePath.equals(other.trustStorePath)
|| !trustStorePass.equals(other.trustStorePass)
|| !trustStoreType.equals(other.trustStoreType)) {
return false;
}
}
if (!Arrays.equals(serverProtocols, other.serverProtocols)) {
return false;
}
return clientProtocol.equals(other.clientProtocol);
}
}
| 9,897 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/TBufferedSocket.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.IOException;
import org.apache.thrift.transport.TIOStreamTransport;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransportException;
public class TBufferedSocket extends TIOStreamTransport {
String client;
public TBufferedSocket(TSocket sock, int bufferSize) throws IOException, TTransportException {
super(new BufferedInputStream(sock.getSocket().getInputStream(), bufferSize),
new BufferedOutputStream(sock.getSocket().getOutputStream(), bufferSize));
client = sock.getSocket().getInetAddress().getHostAddress() + ":" + sock.getSocket().getPort();
}
public String getClientString() {
return client;
}
}
| 9,898 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/rpc/FilterTransport.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.rpc;
import static java.util.Objects.requireNonNull;
import org.apache.thrift.TConfiguration;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
/**
* Transport that simply wraps another transport. This is the equivalent of FilterInputStream for
* Thrift transports.
*/
public class FilterTransport extends TTransport {
private final TTransport wrapped;
public FilterTransport(TTransport wrapped) {
requireNonNull(wrapped);
this.wrapped = wrapped;
}
protected TTransport getWrapped() {
return wrapped;
}
@Override
public void open() throws TTransportException {
wrapped.open();
}
@Override
public boolean isOpen() {
return wrapped.isOpen();
}
@Override
public boolean peek() {
return wrapped.peek();
}
@Override
public void close() {
wrapped.close();
}
@Override
public int read(byte[] buf, int off, int len) throws TTransportException {
return wrapped.read(buf, off, len);
}
@Override
public int readAll(byte[] buf, int off, int len) throws TTransportException {
return wrapped.readAll(buf, off, len);
}
@Override
public void write(byte[] buf) throws TTransportException {
wrapped.write(buf);
}
@Override
public void write(byte[] buf, int off, int len) throws TTransportException {
wrapped.write(buf, off, len);
}
@Override
public void flush() throws TTransportException {
wrapped.flush();
}
@Override
public byte[] getBuffer() {
return wrapped.getBuffer();
}
@Override
public int getBufferPosition() {
return wrapped.getBufferPosition();
}
@Override
public int getBytesRemainingInBuffer() {
return wrapped.getBytesRemainingInBuffer();
}
@Override
public void consumeBuffer(int len) {
wrapped.consumeBuffer(len);
}
@Override
public TConfiguration getConfiguration() {
return wrapped.getConfiguration();
}
@Override
public void updateKnownMessageSize(long size) throws TTransportException {
wrapped.updateKnownMessageSize(size);
}
@Override
public void checkReadBytesAvailable(long numBytes) throws TTransportException {
wrapped.checkReadBytesAvailable(numBytes);
}
}
| 9,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.