index
int64
0
0
repo_id
stringlengths
9
205
file_path
stringlengths
31
246
content
stringlengths
1
12.2M
__index_level_0__
int64
0
10k
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Version.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util; import org.apache.accumulo.start.Main; import org.apache.accumulo.start.spi.KeywordExecutable; import com.google.auto.service.AutoService; @AutoService(KeywordExecutable.class) public class Version implements KeywordExecutable { @Override public String keyword() { return "version"; } @Override public UsageGroup usageGroup() { return UsageGroup.CORE; } @Override public String description() { return "Prints Accumulo version"; } @Override public void execute(final String[] args) throws Exception { Class<?> runTMP = Main.getClassLoader().loadClass("org.apache.accumulo.core.Constants"); System.out.println(runTMP.getField("VERSION").get(null)); } }
9,600
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/TextUtil.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util; import static java.nio.charset.StandardCharsets.UTF_8; import java.nio.ByteBuffer; import org.apache.accumulo.core.Constants; import org.apache.hadoop.io.Text; public final class TextUtil { public static byte[] getBytes(Text text) { byte[] bytes = text.getBytes(); if (bytes.length != text.getLength()) { bytes = new byte[text.getLength()]; System.arraycopy(text.getBytes(), 0, bytes, 0, bytes.length); } return bytes; } public static ByteBuffer getByteBuffer(Text text) { if (text == null) { return null; } byte[] bytes = text.getBytes(); return ByteBuffer.wrap(bytes, 0, text.getLength()); } public static Text truncate(Text text, int maxLen) { if (text.getLength() > maxLen) { Text newText = new Text(); newText.append(text.getBytes(), 0, maxLen); String suffix = "... TRUNCATED"; newText.append(suffix.getBytes(UTF_8), 0, suffix.length()); return newText; } return text; } public static Text truncate(Text row) { return truncate(row, Constants.MAX_DATA_TO_PRINT); } }
9,601
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util; import java.io.Console; import org.apache.accumulo.core.cli.ClientOpts.PasswordConverter; import org.apache.accumulo.core.cli.Help; import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties; import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.TokenProperty; import org.apache.accumulo.core.client.security.tokens.PasswordToken; import org.apache.accumulo.core.conf.ClientProperty; import org.apache.accumulo.start.spi.KeywordExecutable; import com.beust.jcommander.Parameter; import com.google.auto.service.AutoService; @AutoService(KeywordExecutable.class) public class CreateToken implements KeywordExecutable { private Console reader = null; private Console getConsoleReader() { if (reader == null) { reader = System.console(); } return reader; } static class Opts extends Help { @Parameter(names = {"-u", "--user"}, description = "Connection user") public String principal = null; @Parameter(names = "-p", converter = PasswordConverter.class, description = "Connection password") public String password = null; @Parameter(names = "--password", converter = PasswordConverter.class, description = "Enter the connection password", password = true) public String securePassword = null; @Parameter(names = {"-tc", "--tokenClass"}, description = "The class of the authentication token") public String tokenClassName = PasswordToken.class.getName(); } public static void main(String[] args) { new CreateToken().execute(args); } @Override public String keyword() { return "create-token"; } @Override public String description() { return "Creates authentication token"; } @Override public void execute(String[] args) { Opts opts = new Opts(); opts.parseArgs("accumulo create-token", args); String pass = opts.password; if (pass == null && opts.securePassword != null) { pass = opts.securePassword; } String principal = opts.principal; if (principal == null) { principal = getConsoleReader().readLine("Username (aka principal): "); } AuthenticationToken token; try { token = Class.forName(opts.tokenClassName).asSubclass(AuthenticationToken.class) .getDeclaredConstructor().newInstance(); } catch (ReflectiveOperationException e) { throw new IllegalStateException(e); } Properties props = new Properties(); for (TokenProperty tp : token.getProperties()) { String input; if (pass != null && tp.getKey().equals("password")) { input = pass; } else { if (tp.getMask()) { input = getConsoleReader().readLine(tp.getDescription() + ": ", '*'); } else { input = getConsoleReader().readLine(tp.getDescription() + ": "); } } props.put(tp.getKey(), input); token.init(props); } System.out.println("auth.type = " + opts.tokenClassName); System.out.println("auth.principal = " + principal); System.out.println("auth.token = " + ClientProperty.encodeToken(token)); } }
9,602
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/DurationFormat.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util; public class DurationFormat { private final String str; public DurationFormat(long time, String space) { String dash = "-"; long ms, sec, min, hr, day, yr; if (time == 0) { str = dash; return; } ms = time % 1000; time /= 1000; if (time == 0) { str = String.format("%dms", ms); return; } sec = time % 60; time /= 60; if (time == 0) { str = String.format("%ds" + space + "%dms", sec, ms); return; } min = time % 60; time /= 60; if (time == 0) { str = String.format("%dm" + space + "%ds", min, sec); return; } hr = time % 24; time /= 24; if (time == 0) { str = String.format("%dh" + space + "%dm", hr, min); return; } day = time % 365; time /= 365; if (time == 0) { str = String.format("%dd" + space + "%dh", day, hr); return; } yr = time; str = String.format("%dy" + space + "%dd", yr, day); } @Override public String toString() { return str; } }
9,603
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/tables/TableZooHelper.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.tables; import static com.google.common.base.Preconditions.checkArgument; import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.concurrent.TimeUnit.MINUTES; import static org.apache.accumulo.core.util.Validators.EXISTING_TABLE_NAME; import java.util.List; import java.util.Objects; import org.apache.accumulo.core.Constants; import org.apache.accumulo.core.client.NamespaceNotFoundException; import org.apache.accumulo.core.client.TableNotFoundException; import org.apache.accumulo.core.clientImpl.ClientContext; import org.apache.accumulo.core.clientImpl.Namespaces; import org.apache.accumulo.core.data.NamespaceId; import org.apache.accumulo.core.data.TableId; import org.apache.accumulo.core.fate.zookeeper.ZooCache; import org.apache.accumulo.core.manager.state.tables.TableState; import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; public class TableZooHelper implements AutoCloseable { private final ClientContext context; // Per instance cache will expire after 10 minutes in case we // encounter an instance not used frequently private final Cache<TableZooHelper,TableMap> instanceToMapCache = Caffeine.newBuilder().expireAfterAccess(10, MINUTES).build(); public TableZooHelper(ClientContext context) { this.context = Objects.requireNonNull(context); } /** * Lookup table ID in ZK. * * @throws TableNotFoundException if not found; if the namespace was not found, this has a * getCause() of NamespaceNotFoundException */ public TableId getTableId(String tableName) throws TableNotFoundException { try { return _getTableIdDetectNamespaceNotFound(EXISTING_TABLE_NAME.validate(tableName)); } catch (NamespaceNotFoundException e) { throw new TableNotFoundException(tableName, e); } } /** * Lookup table ID in ZK. If not found, clears cache and tries again. */ public TableId _getTableIdDetectNamespaceNotFound(String tableName) throws NamespaceNotFoundException, TableNotFoundException { TableId tableId = getTableMap().getNameToIdMap().get(tableName); if (tableId == null) { // maybe the table exist, but the cache was not updated yet... // so try to clear the cache and check again clearTableListCache(); tableId = getTableMap().getNameToIdMap().get(tableName); if (tableId == null) { String namespace = TableNameUtil.qualify(tableName).getFirst(); if (Namespaces.getNameToIdMap(context).containsKey(namespace)) { throw new TableNotFoundException(null, tableName, null); } else { throw new NamespaceNotFoundException(null, namespace, null); } } } return tableId; } public String getTableName(TableId tableId) throws TableNotFoundException { String tableName = getTableMap().getIdtoNameMap().get(tableId); if (tableName == null) { throw new TableNotFoundException(tableId.canonical(), null, null); } return tableName; } /** * Get the TableMap from the cache. A new one will be populated when needed. Cache is cleared * manually by calling {@link #clearTableListCache()} */ public TableMap getTableMap() { final ZooCache zc = context.getZooCache(); TableMap map = getCachedTableMap(); if (!map.isCurrent(zc)) { instanceToMapCache.invalidateAll(); map = getCachedTableMap(); } return map; } private TableMap getCachedTableMap() { return instanceToMapCache.get(this, k -> new TableMap(context)); } public boolean tableNodeExists(TableId tableId) { ZooCache zc = context.getZooCache(); List<String> tableIds = zc.getChildren(context.getZooKeeperRoot() + Constants.ZTABLES); return tableIds.contains(tableId.canonical()); } public void clearTableListCache() { context.getZooCache().clear(context.getZooKeeperRoot() + Constants.ZTABLES); context.getZooCache().clear(context.getZooKeeperRoot() + Constants.ZNAMESPACES); instanceToMapCache.invalidateAll(); } public String getPrintableTableInfoFromId(TableId tableId) { try { return _printableTableInfo(getTableName(tableId), tableId); } catch (TableNotFoundException e) { return _printableTableInfo(null, tableId); } } public String getPrintableTableInfoFromName(String tableName) { try { return _printableTableInfo(tableName, getTableId(tableName)); } catch (TableNotFoundException e) { return _printableTableInfo(tableName, null); } } private String _printableTableInfo(String tableName, TableId tableId) { return String.format("%s(ID:%s)", tableName == null ? "?" : tableName, tableId == null ? "?" : tableId.canonical()); } /** * Get the current state of the table using the tableid. The boolean clearCache, if true will * clear the table state in zookeeper before fetching the state. Added with ACCUMULO-4574. * * @param tableId the table id * @param clearCachedState if true clear the table state in zookeeper before checking status * @return the table state. */ public TableState getTableState(TableId tableId, boolean clearCachedState) { String statePath = context.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId.canonical() + Constants.ZTABLE_STATE; if (clearCachedState) { context.getZooCache().clear(context.getZooKeeperRoot() + statePath); instanceToMapCache.invalidateAll(); } ZooCache zc = context.getZooCache(); byte[] state = zc.get(statePath); if (state == null) { return TableState.UNKNOWN; } return TableState.valueOf(new String(state, UTF_8)); } /** * Returns the namespace id for a given table ID. * * @param tableId The tableId * @return The namespace id which this table resides in. * @throws IllegalArgumentException if the table doesn't exist in ZooKeeper */ public NamespaceId getNamespaceId(TableId tableId) throws TableNotFoundException { checkArgument(context != null, "instance is null"); checkArgument(tableId != null, "tableId is null"); ZooCache zc = context.getZooCache(); byte[] n = zc.get(context.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_NAMESPACE); // We might get null out of ZooCache if this tableID doesn't exist if (n == null) { throw new TableNotFoundException(tableId.canonical(), null, null); } return NamespaceId.of(new String(n, UTF_8)); } @Override public void close() { instanceToMapCache.invalidateAll(); } }
9,604
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/tables/TableNameUtil.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.tables; import static org.apache.accumulo.core.util.Validators.EXISTING_TABLE_NAME; import org.apache.accumulo.core.clientImpl.Namespace; import org.apache.accumulo.core.util.Pair; public class TableNameUtil { // static utility only; don't allow instantiation private TableNameUtil() {} public static String qualified(String tableName) { return qualified(tableName, Namespace.DEFAULT.name()); } public static String qualified(String tableName, String defaultNamespace) { Pair<String,String> qualifiedTableName = qualify(tableName, defaultNamespace); if (Namespace.DEFAULT.name().equals(qualifiedTableName.getFirst())) { return qualifiedTableName.getSecond(); } else { return qualifiedTableName.toString("", ".", ""); } } public static Pair<String,String> qualify(String tableName) { return qualify(tableName, Namespace.DEFAULT.name()); } private static Pair<String,String> qualify(String tableName, String defaultNamespace) { EXISTING_TABLE_NAME.validate(tableName); if (tableName.contains(".")) { String[] s = tableName.split("\\.", 2); return new Pair<>(s[0], s[1]); } return new Pair<>(defaultNamespace, tableName); } }
9,605
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/tables/TableMap.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.tables; import static java.nio.charset.StandardCharsets.UTF_8; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.accumulo.core.Constants; import org.apache.accumulo.core.client.NamespaceNotFoundException; import org.apache.accumulo.core.clientImpl.ClientContext; import org.apache.accumulo.core.clientImpl.Namespace; import org.apache.accumulo.core.clientImpl.Namespaces; import org.apache.accumulo.core.data.NamespaceId; import org.apache.accumulo.core.data.TableId; import org.apache.accumulo.core.fate.zookeeper.ZooCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableMap; /** * Used for thread safe caching of immutable table ID maps. See ACCUMULO-4778. */ public class TableMap { private static final Logger log = LoggerFactory.getLogger(TableMap.class); private final Map<String,TableId> tableNameToIdMap; private final Map<TableId,String> tableIdToNameMap; private final ZooCache zooCache; private final long updateCount; public TableMap(ClientContext context) { this.zooCache = context.getZooCache(); // important to read this first this.updateCount = zooCache.getUpdateCount(); List<String> tableIds = zooCache.getChildren(context.getZooKeeperRoot() + Constants.ZTABLES); Map<NamespaceId,String> namespaceIdToNameMap = new HashMap<>(); final var tableNameToIdBuilder = ImmutableMap.<String,TableId>builder(); final var tableIdToNameBuilder = ImmutableMap.<TableId,String>builder(); // use StringBuilder to construct zPath string efficiently across many tables StringBuilder zPathBuilder = new StringBuilder(); zPathBuilder.append(context.getZooKeeperRoot()).append(Constants.ZTABLES).append("/"); int prefixLength = zPathBuilder.length(); for (String tableIdStr : tableIds) { // reset StringBuilder to prefix length before appending ID and suffix zPathBuilder.setLength(prefixLength); zPathBuilder.append(tableIdStr).append(Constants.ZTABLE_NAME); byte[] tableName = zooCache.get(zPathBuilder.toString()); zPathBuilder.setLength(prefixLength); zPathBuilder.append(tableIdStr).append(Constants.ZTABLE_NAMESPACE); byte[] nId = zooCache.get(zPathBuilder.toString()); String namespaceName = Namespace.DEFAULT.name(); // create fully qualified table name if (nId == null) { namespaceName = null; } else { NamespaceId namespaceId = NamespaceId.of(new String(nId, UTF_8)); if (!namespaceId.equals(Namespace.DEFAULT.id())) { try { namespaceName = namespaceIdToNameMap.get(namespaceId); if (namespaceName == null) { namespaceName = Namespaces.getNamespaceName(context, namespaceId); namespaceIdToNameMap.put(namespaceId, namespaceName); } } catch (NamespaceNotFoundException e) { log.error("Table (" + tableIdStr + ") contains reference to namespace (" + namespaceId + ") that doesn't exist", e); continue; } } } if (tableName != null && namespaceName != null) { String tableNameStr = TableNameUtil.qualified(new String(tableName, UTF_8), namespaceName); TableId tableId = TableId.of(tableIdStr); tableNameToIdBuilder.put(tableNameStr, tableId); tableIdToNameBuilder.put(tableId, tableNameStr); } } tableNameToIdMap = tableNameToIdBuilder.build(); tableIdToNameMap = tableIdToNameBuilder.build(); } public Map<String,TableId> getNameToIdMap() { return tableNameToIdMap; } public Map<TableId,String> getIdtoNameMap() { return tableIdToNameMap; } public boolean isCurrent(ZooCache zc) { return this.zooCache == zc && this.updateCount == zc.getUpdateCount(); } }
9,606
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionPlanImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.compaction; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Set; import org.apache.accumulo.core.client.admin.compaction.CompactableFile; import org.apache.accumulo.core.spi.compaction.CompactionExecutorId; import org.apache.accumulo.core.spi.compaction.CompactionJob; import org.apache.accumulo.core.spi.compaction.CompactionKind; import org.apache.accumulo.core.spi.compaction.CompactionPlan; import com.google.common.base.Preconditions; public class CompactionPlanImpl implements CompactionPlan { private final Collection<CompactionJob> jobs; private CompactionPlanImpl(Collection<CompactionJob> jobs) { this.jobs = List.copyOf(jobs); } @Override public Collection<CompactionJob> getJobs() { return jobs; } @Override public String toString() { return "jobs: " + jobs; } public static class BuilderImpl implements CompactionPlan.Builder { private final CompactionKind kind; private ArrayList<CompactionJob> jobs = new ArrayList<>(); private final Set<CompactableFile> allFiles; private final Set<CompactableFile> seenFiles = new HashSet<>(); private final Set<CompactableFile> candidates; public BuilderImpl(CompactionKind kind, Set<CompactableFile> allFiles, Set<CompactableFile> candidates) { this.kind = kind; this.allFiles = allFiles; this.candidates = candidates; } @Override public Builder addJob(short priority, CompactionExecutorId executor, Collection<CompactableFile> files) { Set<CompactableFile> filesSet = files instanceof Set ? (Set<CompactableFile>) files : Set.copyOf(files); Preconditions.checkArgument(Collections.disjoint(filesSet, seenFiles), "Job files overlaps with previous job %s %s", files, jobs); Preconditions.checkArgument(candidates.containsAll(filesSet), "Job files are not compaction candidates %s %s", files, candidates); seenFiles.addAll(filesSet); jobs.add(new CompactionJobImpl(priority, executor, filesSet, kind, Optional.of(filesSet.equals(allFiles)))); return this; } @Override public CompactionPlan build() { return new CompactionPlanImpl(jobs); } } }
9,607
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionJobPrioritizer.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.compaction; import java.util.Comparator; import org.apache.accumulo.core.spi.compaction.CompactionJob; import org.apache.accumulo.core.spi.compaction.CompactionKind; public class CompactionJobPrioritizer { public static final Comparator<CompactionJob> JOB_COMPARATOR = Comparator.comparingInt(CompactionJob::getPriority) .thenComparingInt(job -> job.getFiles().size()).reversed(); public static short createPriority(CompactionKind kind, int totalFiles, int compactingFiles) { int prio = totalFiles + compactingFiles; switch (kind) { case USER: // user-initiated compactions will have a positive priority // based on number of files if (prio > Short.MAX_VALUE) { return Short.MAX_VALUE; } return (short) prio; case SELECTOR: case SYSTEM: // system-initiated compactions will have a negative priority // starting at -32768 and increasing based on number of files // maxing out at -1 if (prio > Short.MAX_VALUE) { return -1; } else { return (short) (Short.MIN_VALUE + prio); } default: throw new AssertionError("Unknown kind " + kind); } } }
9,608
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/compaction/RunningCompactionInfo.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.compaction; import static java.util.Objects.requireNonNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; import java.util.TreeMap; import org.apache.accumulo.core.compaction.thrift.TCompactionStatusUpdate; import org.apache.accumulo.core.compaction.thrift.TExternalCompaction; import org.apache.accumulo.core.dataImpl.KeyExtent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class RunningCompactionInfo { private static final Logger log = LoggerFactory.getLogger(RunningCompactionInfo.class); // DO NOT CHANGE Variable names - they map to JSON keys in the Monitor public final String server; public final String queueName; public final String ecid; public final String kind; public final String tableId; public final int numFiles; public final float progress; public final long duration; public final String status; public final long lastUpdate; /** * Info parsed about the external running compaction. Calculate the progress, which is defined as * the percentage of bytesRead / bytesToBeCompacted of the last update. */ public RunningCompactionInfo(TExternalCompaction ec) { requireNonNull(ec, "Thrift external compaction is null."); var updates = requireNonNull(ec.getUpdates(), "Missing Thrift external compaction updates"); var job = requireNonNull(ec.getJob(), "Thrift external compaction job is null"); server = ec.getCompactor(); queueName = ec.getQueueName(); ecid = job.getExternalCompactionId(); kind = job.getKind().name(); tableId = KeyExtent.fromThrift(job.getExtent()).tableId().canonical(); numFiles = job.getFiles().size(); // parse the updates map long nowMillis = System.currentTimeMillis(); long startedMillis = nowMillis; float percent = 0f; long updateMillis; TCompactionStatusUpdate last; // sort updates by key, which is a timestamp TreeMap<Long,TCompactionStatusUpdate> sorted = new TreeMap<>(updates); var firstEntry = sorted.firstEntry(); var lastEntry = sorted.lastEntry(); if (firstEntry != null) { startedMillis = firstEntry.getKey(); } duration = nowMillis - startedMillis; long durationMinutes = MILLISECONDS.toMinutes(duration); if (durationMinutes > 15) { log.warn("Compaction {} has been running for {} minutes", ecid, durationMinutes); } // last entry is all we care about so bail if null if (lastEntry != null) { last = lastEntry.getValue(); updateMillis = lastEntry.getKey(); } else { log.debug("No updates found for {}", ecid); lastUpdate = 1; progress = percent; status = "na"; return; } long sinceLastUpdateSeconds = MILLISECONDS.toSeconds(nowMillis - updateMillis); log.debug("Time since Last update {} - {} = {} seconds", nowMillis, updateMillis, sinceLastUpdateSeconds); var total = last.getEntriesToBeCompacted(); if (total > 0) { percent = (last.getEntriesRead() / (float) total) * 100; } lastUpdate = nowMillis - updateMillis; progress = percent; if (updates.isEmpty()) { status = "na"; } else { status = last.state.name(); } log.debug("Parsed running compaction {} for {} with progress = {}%", status, ecid, progress); if (sinceLastUpdateSeconds > 30) { log.debug("Compaction hasn't progressed from {} in {} seconds.", progress, sinceLastUpdateSeconds); } } @Override public String toString() { return ecid + ": " + status + " progress: " + progress; } }
9,609
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionPlannerInitParams.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.compaction; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.spi.common.ServiceEnvironment; import org.apache.accumulo.core.spi.compaction.CompactionExecutorId; import org.apache.accumulo.core.spi.compaction.CompactionPlanner; import org.apache.accumulo.core.spi.compaction.CompactionServiceId; import org.apache.accumulo.core.spi.compaction.ExecutorManager; import com.google.common.base.Preconditions; public class CompactionPlannerInitParams implements CompactionPlanner.InitParameters { private final Map<String,String> plannerOpts; private final Map<CompactionExecutorId,Integer> requestedExecutors; private final Set<CompactionExecutorId> requestedExternalExecutors; private final ServiceEnvironment senv; private final CompactionServiceId serviceId; public CompactionPlannerInitParams(CompactionServiceId serviceId, Map<String,String> plannerOpts, ServiceEnvironment senv) { this.serviceId = serviceId; this.plannerOpts = plannerOpts; this.requestedExecutors = new HashMap<>(); this.requestedExternalExecutors = new HashSet<>(); this.senv = senv; } @Override public ServiceEnvironment getServiceEnvironment() { return senv; } @Override public Map<String,String> getOptions() { return plannerOpts; } @Override public String getFullyQualifiedOption(String key) { return Property.TSERV_COMPACTION_SERVICE_PREFIX.getKey() + serviceId + ".opts." + key; } @Override public ExecutorManager getExecutorManager() { return new ExecutorManager() { @Override public CompactionExecutorId createExecutor(String executorName, int threads) { Preconditions.checkArgument(threads > 0, "Positive number of threads required : %s", threads); var ceid = CompactionExecutorIdImpl.internalId(serviceId, executorName); Preconditions.checkState(!getRequestedExecutors().containsKey(ceid), "Duplicate Compaction Executor ID found"); getRequestedExecutors().put(ceid, threads); return ceid; } @Override public CompactionExecutorId getExternalExecutor(String name) { var ceid = CompactionExecutorIdImpl.externalId(name); Preconditions.checkArgument(!getRequestedExternalExecutors().contains(ceid), "Duplicate external executor for queue " + name); getRequestedExternalExecutors().add(ceid); return ceid; } }; } public Map<CompactionExecutorId,Integer> getRequestedExecutors() { return requestedExecutors; } public Set<CompactionExecutorId> getRequestedExternalExecutors() { return requestedExternalExecutors; } }
9,610
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionJobImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.compaction; import java.util.Collection; import java.util.Objects; import java.util.Optional; import java.util.Set; import org.apache.accumulo.core.client.admin.compaction.CompactableFile; import org.apache.accumulo.core.spi.compaction.CompactionExecutorId; import org.apache.accumulo.core.spi.compaction.CompactionJob; import org.apache.accumulo.core.spi.compaction.CompactionKind; /** * An immutable object that describes what files to compact and where to compact them. * * @since 2.1.0 * @see org.apache.accumulo.core.spi.compaction */ public class CompactionJobImpl implements CompactionJob { private final short priority; private final CompactionExecutorId executor; private final Set<CompactableFile> files; private final CompactionKind kind; // Tracks if a job selected all of the tablets files that existed at the time the job was created. private final Optional<Boolean> jobSelectedAll; /** * * @param jobSelectedAll This parameters only needs to be non-empty for job objects that are used * to start compaction. After a job is running, its not used. So when a job object is * recreated for a running external compaction this parameter can be empty. */ public CompactionJobImpl(short priority, CompactionExecutorId executor, Collection<CompactableFile> files, CompactionKind kind, Optional<Boolean> jobSelectedAll) { this.priority = priority; this.executor = Objects.requireNonNull(executor); this.files = Set.copyOf(files); this.kind = Objects.requireNonNull(kind); this.jobSelectedAll = Objects.requireNonNull(jobSelectedAll); } @Override public short getPriority() { return priority; } /** * @return The executor to run the job. */ @Override public CompactionExecutorId getExecutor() { return executor; } /** * @return The files to compact */ @Override public Set<CompactableFile> getFiles() { return files; } /** * @return The kind of compaction this is. */ @Override public CompactionKind getKind() { return kind; } @Override public int hashCode() { return Objects.hash(priority, executor, files, kind); } public boolean selectedAll() { return jobSelectedAll.orElseThrow(); } @Override public boolean equals(Object o) { if (o instanceof CompactionJobImpl) { CompactionJobImpl ocj = (CompactionJobImpl) o; return priority == ocj.priority && executor.equals(ocj.executor) && files.equals(ocj.files) && kind == ocj.kind; } return false; } @Override public String toString() { return "CompactionJob [priority=" + priority + ", executor=" + executor + ", files=" + files + ", kind=" + kind + "]"; } }
9,611
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/compaction/ExternalCompactionUtil.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.compaction; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import org.apache.accumulo.core.Constants; import org.apache.accumulo.core.clientImpl.ClientContext; import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException; import org.apache.accumulo.core.compaction.thrift.CompactorService; import org.apache.accumulo.core.fate.zookeeper.ZooCache.ZcStat; import org.apache.accumulo.core.fate.zookeeper.ZooReader; import org.apache.accumulo.core.lock.ServiceLock; import org.apache.accumulo.core.lock.ServiceLockData.ThriftService; import org.apache.accumulo.core.metadata.schema.ExternalCompactionId; import org.apache.accumulo.core.rpc.ThriftUtil; import org.apache.accumulo.core.rpc.clients.ThriftClientTypes; import org.apache.accumulo.core.tabletserver.thrift.ActiveCompaction; import org.apache.accumulo.core.tabletserver.thrift.TExternalCompactionJob; import org.apache.accumulo.core.trace.TraceUtil; import org.apache.accumulo.core.util.threads.ThreadPools; import org.apache.thrift.TException; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NoNodeException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.net.HostAndPort; public class ExternalCompactionUtil { private static class RunningCompactionFuture { private final String queue; private final HostAndPort compactor; private final Future<TExternalCompactionJob> future; public RunningCompactionFuture(String queue, HostAndPort compactor, Future<TExternalCompactionJob> future) { this.queue = queue; this.compactor = compactor; this.future = future; } public String getQueue() { return queue; } public HostAndPort getCompactor() { return compactor; } public Future<TExternalCompactionJob> getFuture() { return future; } } private static final Logger LOG = LoggerFactory.getLogger(ExternalCompactionUtil.class); /** * Utility for returning the address of a service in the form host:port * * @param address HostAndPort of service * @return host and port */ public static String getHostPortString(HostAndPort address) { if (address == null) { return null; } return address.toString(); } /** * * @return Optional HostAndPort of Coordinator node if found */ public static Optional<HostAndPort> findCompactionCoordinator(ClientContext context) { final String lockPath = context.getZooKeeperRoot() + Constants.ZCOORDINATOR_LOCK; return ServiceLock.getLockData(context.getZooCache(), ServiceLock.path(lockPath), new ZcStat()) .map(sld -> sld.getAddress(ThriftService.COORDINATOR)); } /** * @return map of queue names to compactor addresses */ public static Map<String,List<HostAndPort>> getCompactorAddrs(ClientContext context) { try { final Map<String,List<HostAndPort>> queuesAndAddresses = new HashMap<>(); final String compactorQueuesPath = context.getZooKeeperRoot() + Constants.ZCOMPACTORS; ZooReader zooReader = context.getZooReader(); List<String> queues = zooReader.getChildren(compactorQueuesPath); for (String queue : queues) { queuesAndAddresses.putIfAbsent(queue, new ArrayList<>()); try { List<String> compactors = zooReader.getChildren(compactorQueuesPath + "/" + queue); for (String compactor : compactors) { // compactor is the address, we are checking to see if there is a child node which // represents the compactor's lock as a check that it's alive. List<String> children = zooReader.getChildren(compactorQueuesPath + "/" + queue + "/" + compactor); if (!children.isEmpty()) { LOG.trace("Found live compactor {} ", compactor); queuesAndAddresses.get(queue).add(HostAndPort.fromString(compactor)); } } } catch (NoNodeException e) { LOG.trace("Ignoring node that went missing", e); } } return queuesAndAddresses; } catch (KeeperException e) { throw new IllegalStateException(e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IllegalStateException(e); } } /** * * @param compactor compactor address * @param context client context * @return list of active compaction * @throws ThriftSecurityException tserver permission error */ public static List<ActiveCompaction> getActiveCompaction(HostAndPort compactor, ClientContext context) throws ThriftSecurityException { CompactorService.Client client = null; try { client = ThriftUtil.getClient(ThriftClientTypes.COMPACTOR, compactor, context); return client.getActiveCompactions(TraceUtil.traceInfo(), context.rpcCreds()); } catch (ThriftSecurityException e) { throw e; } catch (TException e) { LOG.debug("Failed to contact compactor {}", compactor, e); } finally { ThriftUtil.returnClient(client, context); } return List.of(); } /** * Get the compaction currently running on the Compactor * * @param compactorAddr compactor address * @param context context * @return external compaction job or null if none running */ public static TExternalCompactionJob getRunningCompaction(HostAndPort compactorAddr, ClientContext context) { CompactorService.Client client = null; try { client = ThriftUtil.getClient(ThriftClientTypes.COMPACTOR, compactorAddr, context); TExternalCompactionJob job = client.getRunningCompaction(TraceUtil.traceInfo(), context.rpcCreds()); if (job.getExternalCompactionId() != null) { LOG.debug("Compactor {} is running {}", compactorAddr, job.getExternalCompactionId()); return job; } } catch (TException e) { LOG.debug("Failed to contact compactor {}", compactorAddr, e); } finally { ThriftUtil.returnClient(client, context); } return null; } private static ExternalCompactionId getRunningCompactionId(HostAndPort compactorAddr, ClientContext context) { CompactorService.Client client = null; try { client = ThriftUtil.getClient(ThriftClientTypes.COMPACTOR, compactorAddr, context); String secid = client.getRunningCompactionId(TraceUtil.traceInfo(), context.rpcCreds()); if (!secid.isEmpty()) { return ExternalCompactionId.of(secid); } } catch (TException e) { LOG.debug("Failed to contact compactor {}", compactorAddr, e); } finally { ThriftUtil.returnClient(client, context); } return null; } /** * This method returns information from the Compactor about the job that is currently running. The * RunningCompactions are not fully populated. This method is used from the CompactionCoordinator * on a restart to re-populate the set of running compactions on the compactors. * * @param context server context * @return list of compactor and external compaction jobs */ public static List<RunningCompaction> getCompactionsRunningOnCompactors(ClientContext context) { final List<RunningCompactionFuture> rcFutures = new ArrayList<>(); final ExecutorService executor = ThreadPools.getServerThreadPools().createFixedThreadPool(16, "CompactorRunningCompactions", false); getCompactorAddrs(context).forEach((q, hp) -> { hp.forEach(hostAndPort -> { rcFutures.add(new RunningCompactionFuture(q, hostAndPort, executor.submit(() -> getRunningCompaction(hostAndPort, context)))); }); }); executor.shutdown(); final List<RunningCompaction> results = new ArrayList<>(); rcFutures.forEach(rcf -> { try { TExternalCompactionJob job = rcf.getFuture().get(); if (null != job && null != job.getExternalCompactionId()) { var compactorAddress = getHostPortString(rcf.getCompactor()); results.add(new RunningCompaction(job, compactorAddress, rcf.getQueue())); } } catch (InterruptedException | ExecutionException e) { throw new IllegalStateException(e); } }); return results; } public static Collection<ExternalCompactionId> getCompactionIdsRunningOnCompactors(ClientContext context) { final ExecutorService executor = ThreadPools.getServerThreadPools().createFixedThreadPool(16, "CompactorRunningCompactions", false); List<Future<ExternalCompactionId>> futures = new ArrayList<>(); getCompactorAddrs(context).forEach((q, hp) -> { hp.forEach(hostAndPort -> { futures.add(executor.submit(() -> getRunningCompactionId(hostAndPort, context))); }); }); executor.shutdown(); HashSet<ExternalCompactionId> runningIds = new HashSet<>(); futures.forEach(future -> { try { ExternalCompactionId ceid = future.get(); if (ceid != null) { runningIds.add(ceid); } } catch (InterruptedException | ExecutionException e) { throw new IllegalStateException(e); } }); return runningIds; } public static int countCompactors(String queueName, ClientContext context) { String queueRoot = context.getZooKeeperRoot() + Constants.ZCOMPACTORS + "/" + queueName; List<String> children = context.getZooCache().getChildren(queueRoot); if (children == null) { return 0; } int count = 0; for (String child : children) { List<String> children2 = context.getZooCache().getChildren(queueRoot + "/" + child); if (children2 != null && !children2.isEmpty()) { count++; } } return count; } public static void cancelCompaction(ClientContext context, HostAndPort compactorAddr, String ecid) { CompactorService.Client client = null; try { client = ThriftUtil.getClient(ThriftClientTypes.COMPACTOR, compactorAddr, context); client.cancel(TraceUtil.traceInfo(), context.rpcCreds(), ecid); } catch (TException e) { LOG.debug("Failed to cancel compactor {} for {}", compactorAddr, ecid, e); } finally { ThriftUtil.returnClient(client, context); } } }
9,612
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/compaction/RunningCompaction.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.compaction; import java.util.Map; import java.util.TreeMap; import org.apache.accumulo.core.compaction.thrift.TCompactionStatusUpdate; import org.apache.accumulo.core.compaction.thrift.TExternalCompaction; import org.apache.accumulo.core.tabletserver.thrift.TExternalCompactionJob; public class RunningCompaction { private final TExternalCompactionJob job; private final String compactorAddress; private final String queueName; private final Map<Long,TCompactionStatusUpdate> updates = new TreeMap<>(); public RunningCompaction(TExternalCompactionJob job, String compactorAddress, String queueName) { this.job = job; this.compactorAddress = compactorAddress; this.queueName = queueName; } public RunningCompaction(TExternalCompaction tEC) { this(tEC.getJob(), tEC.getCompactor(), tEC.getQueueName()); } public Map<Long,TCompactionStatusUpdate> getUpdates() { return updates; } public void addUpdate(Long timestamp, TCompactionStatusUpdate update) { this.updates.put(timestamp, update); } public TExternalCompactionJob getJob() { return job; } public String getCompactorAddress() { return compactorAddress; } public String getQueueName() { return queueName; } }
9,613
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionExecutorIdImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.compaction; import org.apache.accumulo.core.spi.compaction.CompactionExecutorId; import org.apache.accumulo.core.spi.compaction.CompactionServiceId; import com.google.common.base.Preconditions; public class CompactionExecutorIdImpl extends CompactionExecutorId { protected CompactionExecutorIdImpl(String canonical) { super(canonical); } private static final long serialVersionUID = 1L; public boolean isExternalId() { return canonical().startsWith("e."); } public String getExternalName() { Preconditions.checkState(isExternalId()); return canonical().substring("e.".length()); } public static CompactionExecutorId internalId(CompactionServiceId csid, String executorName) { return new CompactionExecutorIdImpl("i." + csid + "." + executorName); } public static CompactionExecutorId externalId(String executorName) { return new CompactionExecutorIdImpl("e." + executorName); } }
9,614
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/compaction/CompactionServicesConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.compaction; import java.util.HashMap; import java.util.Map; import java.util.Objects; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.ConfigurationTypeHelper; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.spi.compaction.CompactionServiceId; import com.google.common.collect.Sets; /** * This class serves to configure compaction services from an {@link AccumuloConfiguration} object. * * Specifically, compaction service properties (those prefixed by "tserver.compaction.major * .service") are used. */ public class CompactionServicesConfig { private final Map<String,String> planners = new HashMap<>(); private final Map<String,Long> rateLimits = new HashMap<>(); private final Map<String,Map<String,String>> options = new HashMap<>(); long defaultRateLimit; public static final CompactionServiceId DEFAULT_SERVICE = CompactionServiceId.of("default"); @SuppressWarnings("removal") private long getDefaultThroughput() { return ConfigurationTypeHelper .getMemoryAsBytes(Property.TSERV_COMPACTION_SERVICE_DEFAULT_RATE_LIMIT.getDefaultValue()); } private Map<String,String> getConfiguration(AccumuloConfiguration aconf) { return aconf.getAllPropertiesWithPrefix(Property.TSERV_COMPACTION_SERVICE_PREFIX); } public CompactionServicesConfig(AccumuloConfiguration aconf) { Map<String,String> configs = getConfiguration(aconf); configs.forEach((prop, val) -> { var suffix = prop.substring(Property.TSERV_COMPACTION_SERVICE_PREFIX.getKey().length()); String[] tokens = suffix.split("\\."); if (tokens.length == 4 && tokens[1].equals("planner") && tokens[2].equals("opts")) { options.computeIfAbsent(tokens[0], k -> new HashMap<>()).put(tokens[3], val); } else if (tokens.length == 2 && tokens[1].equals("planner")) { planners.put(tokens[0], val); } else if (tokens.length == 3 && tokens[1].equals("rate") && tokens[2].equals("limit")) { var eprop = Property.getPropertyByKey(prop); if (eprop == null || aconf.isPropertySet(eprop)) { rateLimits.put(tokens[0], ConfigurationTypeHelper.getFixedMemoryAsBytes(val)); } } else { throw new IllegalArgumentException("Malformed compaction service property " + prop); } }); defaultRateLimit = getDefaultThroughput(); var diff = Sets.difference(options.keySet(), planners.keySet()); if (!diff.isEmpty()) { throw new IllegalArgumentException( "Incomplete compaction service definitions, missing planner class " + diff); } } public long getRateLimit(String serviceName) { return getRateLimits().getOrDefault(serviceName, defaultRateLimit); } @Override public boolean equals(Object o) { if (o instanceof CompactionServicesConfig) { var oc = (CompactionServicesConfig) o; return getPlanners().equals(oc.getPlanners()) && getOptions().equals(oc.getOptions()) && getRateLimits().equals(oc.getRateLimits()); } return false; } @Override public int hashCode() { return Objects.hash(getPlanners(), getOptions(), getRateLimits()); } public Map<String,String> getPlanners() { return planners; } public Map<String,Long> getRateLimits() { return rateLimits; } public Map<String,Map<String,String>> getOptions() { return options; } }
9,615
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ratelimit/SharedRateLimiterFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.ratelimit; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.NANOSECONDS; import java.lang.ref.WeakReference; import java.util.HashMap; import java.util.Map; import java.util.WeakHashMap; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.util.threads.ThreadPools; import org.apache.accumulo.core.util.threads.Threads; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Provides the ability to retrieve a {@link RateLimiter} keyed to a specific string, which will * dynamically update its rate according to a specified callback function. */ public class SharedRateLimiterFactory { private static final long REPORT_RATE = 60000; private static final long UPDATE_RATE = 1000; private static SharedRateLimiterFactory instance = null; private static ScheduledFuture<?> updateTaskFuture; private final Logger log = LoggerFactory.getLogger(SharedRateLimiterFactory.class); private final WeakHashMap<String,WeakReference<SharedRateLimiter>> activeLimiters = new WeakHashMap<>(); private SharedRateLimiterFactory() {} /** Get the singleton instance of the SharedRateLimiterFactory. */ public static synchronized SharedRateLimiterFactory getInstance(AccumuloConfiguration conf) { if (instance == null) { instance = new SharedRateLimiterFactory(); ScheduledThreadPoolExecutor svc = ThreadPools.getServerThreadPools().createGeneralScheduledExecutorService(conf); updateTaskFuture = svc.scheduleWithFixedDelay(Threads .createNamedRunnable("SharedRateLimiterFactory update polling", instance::updateAll), UPDATE_RATE, UPDATE_RATE, MILLISECONDS); ScheduledFuture<?> future = svc.scheduleWithFixedDelay(Threads .createNamedRunnable("SharedRateLimiterFactory report polling", instance::reportAll), REPORT_RATE, REPORT_RATE, MILLISECONDS); ThreadPools.watchNonCriticalScheduledTask(future); } return instance; } /** * A callback which provides the current rate for a {@link RateLimiter}. */ public interface RateProvider { /** * Calculate the current rate for the {@link RateLimiter}. * * @return Count of permits which should be provided per second. A non-positive count is taken * to indicate that no rate limiting should be performed. */ long getDesiredRate(); } /** * Lookup the RateLimiter associated with the specified name, or create a new one for that name. * * @param name key for the rate limiter * @param rateProvider a function which can be called to get what the current rate for the rate * limiter should be. */ public RateLimiter create(String name, RateProvider rateProvider) { synchronized (activeLimiters) { if (updateTaskFuture.isDone()) { log.warn("SharedRateLimiterFactory update task has failed."); } var limiterRef = activeLimiters.get(name); var limiter = limiterRef == null ? null : limiterRef.get(); if (limiter == null) { limiter = new SharedRateLimiter(name, rateProvider, rateProvider.getDesiredRate()); activeLimiters.put(name, new WeakReference<>(limiter)); } return limiter; } } private void copyAndThen(String actionName, Consumer<SharedRateLimiter> action) { Map<String,SharedRateLimiter> limitersCopy = new HashMap<>(); // synchronize only for copy synchronized (activeLimiters) { activeLimiters.forEach((name, limiterRef) -> { var limiter = limiterRef.get(); if (limiter != null) { limitersCopy.put(name, limiter); } }); } limitersCopy.forEach((name, limiter) -> { try { action.accept(limiter); } catch (RuntimeException e) { log.error("Failed to {} limiter {}", actionName, name, e); } }); } /** * Walk through all of the currently active RateLimiters, having each update its current rate. * This is called periodically so that we can dynamically update as configuration changes. */ private void updateAll() { copyAndThen("update", SharedRateLimiter::update); } /** * Walk through all of the currently active RateLimiters, having each report its activity to the * debug log. */ private void reportAll() { copyAndThen("report", SharedRateLimiter::report); } protected class SharedRateLimiter extends GuavaRateLimiter { private AtomicLong permitsAcquired = new AtomicLong(); private AtomicLong lastUpdate = new AtomicLong(); private final RateProvider rateProvider; private final String name; SharedRateLimiter(String name, RateProvider rateProvider, long initialRate) { super(initialRate); this.name = name; this.rateProvider = rateProvider; this.lastUpdate.set(System.nanoTime()); } @Override public void acquire(long numPermits) { super.acquire(numPermits); permitsAcquired.addAndGet(numPermits); } /** Poll the callback, updating the current rate if necessary. */ public void update() { // Reset rate if needed long rate = rateProvider.getDesiredRate(); if (rate != getRate()) { setRate(rate); } } /** Report the current throughput and usage of this rate limiter to the debug log. */ public void report() { if (log.isDebugEnabled()) { long duration = NANOSECONDS.toMillis(System.nanoTime() - lastUpdate.get()); if (duration == 0) { return; } lastUpdate.set(System.nanoTime()); long sum = permitsAcquired.get(); permitsAcquired.set(0); if (sum > 0) { log.debug(String.format("RateLimiter '%s': %,d of %,d permits/second", name, sum * 1000L / duration, getRate())); } } } } }
9,616
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ratelimit/GuavaRateLimiter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.ratelimit; /** Rate limiter from the Guava library. */ public class GuavaRateLimiter implements RateLimiter { private final com.google.common.util.concurrent.RateLimiter rateLimiter; private long currentRate; /** * Constructor * * @param initialRate Count of permits which should be made available per second. A non-positive * rate is taken to indicate there should be no limitation on rate. */ public GuavaRateLimiter(long initialRate) { this.currentRate = initialRate; this.rateLimiter = com.google.common.util.concurrent.RateLimiter .create(initialRate > 0 ? initialRate : Long.MAX_VALUE); } @Override public long getRate() { return currentRate; } /** * Change the rate at which permits are made available. * * @param newRate Count of permits which should be made available per second. A non-positive rate * is taken to indicate that there should be no limitation on rate. */ public void setRate(long newRate) { this.rateLimiter.setRate(newRate > 0 ? newRate : Long.MAX_VALUE); this.currentRate = newRate; } @Override public void acquire(long numPermits) { if (this.currentRate > 0) { while (numPermits > Integer.MAX_VALUE) { rateLimiter.acquire(Integer.MAX_VALUE); numPermits -= Integer.MAX_VALUE; } if (numPermits > 0) { rateLimiter.acquire((int) numPermits); } } } }
9,617
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ratelimit/RateLimiter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.ratelimit; public interface RateLimiter { /** * Get current QPS of the rate limiter, with a non-positive rate indicating no limit. */ long getRate(); /** Sleep until the specified number of queries are available. */ void acquire(long numPermits); }
9,618
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ratelimit/NullRateLimiter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.ratelimit; /** * A rate limiter which doesn't actually limit rates at all. */ public class NullRateLimiter implements RateLimiter { public static final NullRateLimiter INSTANCE = new NullRateLimiter(); private NullRateLimiter() {} @Override public long getRate() { return 0; } @Override public void acquire(long numPermits) {} }
9,619
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/json/ByteArrayToBase64TypeAdapter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.json; import java.lang.reflect.Type; import java.util.Base64; import java.util.Base64.Decoder; import java.util.Base64.Encoder; import java.util.Objects; import com.google.gson.Gson; import com.google.gson.GsonBuilder; import com.google.gson.JsonDeserializationContext; import com.google.gson.JsonDeserializer; import com.google.gson.JsonElement; import com.google.gson.JsonParseException; import com.google.gson.JsonPrimitive; import com.google.gson.JsonSerializationContext; import com.google.gson.JsonSerializer; /** * Gson adapter to handle serializing and deserializing byte arrays using Base64 encoding. */ public class ByteArrayToBase64TypeAdapter implements JsonSerializer<byte[]>, JsonDeserializer<byte[]> { private static final Decoder decoder = Base64.getUrlDecoder(); private static final Encoder encoder = Base64.getUrlEncoder(); @Override public byte[] deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) throws JsonParseException { return decoder.decode(json.getAsString()); } @Override public JsonElement serialize(byte[] src, Type typeOfSrc, JsonSerializationContext context) { return new JsonPrimitive(encoder.encodeToString(src)); } /** * Creates a new Gson instance that registers {@link ByteArrayToBase64TypeAdapter} for handling * serializing/deserializing byte[] types as Base64 encoded * * @return Gson instance */ public static Gson createBase64Gson() { return registerBase64TypeAdapter(new GsonBuilder()).create(); } /** * Register {@link ByteArrayToBase64TypeAdapter} for handling byte[] types on an existing * GsonBuilder * * @param gsonBuilder existing GsonBuilder * @return GsonBuilder */ public static GsonBuilder registerBase64TypeAdapter(final GsonBuilder gsonBuilder) { return Objects.requireNonNull(gsonBuilder).disableHtmlEscaping() .registerTypeHierarchyAdapter(byte[].class, new ByteArrayToBase64TypeAdapter()); } }
9,620
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/threads/AccumuloUncaughtExceptionHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.threads; import java.lang.Thread.UncaughtExceptionHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * UncaughtExceptionHandler that logs all Exceptions and Errors thrown from a Thread. If an Error is * thrown, halt the JVM. * */ class AccumuloUncaughtExceptionHandler implements UncaughtExceptionHandler { private static final Logger LOG = LoggerFactory.getLogger(AccumuloUncaughtExceptionHandler.class); private static boolean isError(Throwable t, int depth) { if (depth > 32) { // This is a peculiar exception. No error has been found, but recursing too deep may cause a // stack overflow so going to stop. Err on the side of caution and assume there could be an // error since not everything was checked. return true; } while (t != null) { if (t instanceof Error) { return true; } for (Throwable suppressed : t.getSuppressed()) { if (isError(suppressed, depth + 1)) { return true; } } t = t.getCause(); } return false; } static boolean isError(Throwable t) { return isError(t, 0); } @Override public void uncaughtException(Thread t, Throwable e) { if (isError(e)) { try { e.printStackTrace(); System.err.println("Error thrown in thread: " + t + ", halting VM."); } catch (Throwable e1) { // If e == OutOfMemoryError, then it's probably that another Error might be // thrown when trying to print to System.err. } finally { Runtime.getRuntime().halt(-1); } } else { LOG.error("Caught an Exception in {}. Thread is dead.", t, e); } } }
9,621
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/threads/NamedThreadFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.threads; import java.lang.Thread.UncaughtExceptionHandler; import java.util.OptionalInt; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicInteger; /** * ThreadFactory that sets the name and optionally the priority on a newly created Thread. */ class NamedThreadFactory implements ThreadFactory { private static final String FORMAT = "%s-%s-%d"; private final AtomicInteger threadNum = new AtomicInteger(1); private final String name; private final OptionalInt priority; private final UncaughtExceptionHandler handler; NamedThreadFactory(String name, UncaughtExceptionHandler ueh) { this(name, OptionalInt.empty(), ueh); } NamedThreadFactory(String name, OptionalInt priority, UncaughtExceptionHandler ueh) { this.name = name; this.priority = priority; this.handler = ueh; } @Override public Thread newThread(Runnable r) { String threadName = null; if (r instanceof NamedRunnable) { NamedRunnable nr = (NamedRunnable) r; threadName = String.format(FORMAT, name, nr.getName(), threadNum.getAndIncrement()); } else { threadName = String.format(FORMAT, name, r.getClass().getSimpleName(), threadNum.getAndIncrement()); } return Threads.createThread(threadName, priority, r, handler); } }
9,622
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/threads/NamedRunnable.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.threads; /** * Runnable implementation that has a name and priority. Used by the NamedThreadFactory when * creating new Threads */ class NamedRunnable implements Runnable { private final String name; private final Runnable r; NamedRunnable(String name, Runnable r) { this.name = name; this.r = r; } public String getName() { return name; } @Override public void run() { r.run(); } }
9,623
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/threads/Threads.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.threads; import java.lang.Thread.UncaughtExceptionHandler; import java.util.OptionalInt; import org.apache.accumulo.core.trace.TraceUtil; public class Threads { public static final UncaughtExceptionHandler UEH = new AccumuloUncaughtExceptionHandler(); public static class AccumuloDaemonThread extends Thread { public AccumuloDaemonThread(Runnable target, String name, UncaughtExceptionHandler ueh) { super(target, name); setDaemon(true); setUncaughtExceptionHandler(ueh); } public AccumuloDaemonThread(String name) { this(name, UEH); } private AccumuloDaemonThread(String name, UncaughtExceptionHandler ueh) { super(name); setDaemon(true); setUncaughtExceptionHandler(ueh); } } public static Runnable createNamedRunnable(String name, Runnable r) { return new NamedRunnable(name, r); } public static Thread createThread(String name, Runnable r) { return createThread(name, OptionalInt.empty(), r, UEH); } public static Thread createThread(String name, OptionalInt priority, Runnable r) { return createThread(name, priority, r, UEH); } public static Thread createThread(String name, OptionalInt priority, Runnable r, UncaughtExceptionHandler ueh) { Thread thread = new AccumuloDaemonThread(TraceUtil.wrap(r), name, ueh); priority.ifPresent(thread::setPriority); return thread; } }
9,624
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/threads/ThreadPools.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.threads; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; import java.lang.Thread.UncaughtExceptionHandler; import java.util.Iterator; import java.util.List; import java.util.OptionalInt; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.IntSupplier; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.metrics.MetricsUtil; import org.apache.accumulo.core.trace.TraceUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; @SuppressFBWarnings(value = "RV_EXCEPTION_NOT_THROWN", justification = "Throwing Error for it to be caught by AccumuloUncaughtExceptionHandler") public class ThreadPools { public static class ExecutionError extends Error { private static final long serialVersionUID = 1L; public ExecutionError(String message, Throwable cause) { super(message, cause); } } private static final Logger LOG = LoggerFactory.getLogger(ThreadPools.class); // the number of seconds before we allow a thread to terminate with non-use. public static final long DEFAULT_TIMEOUT_MILLISECS = 180000L; private static final ThreadPools SERVER_INSTANCE = new ThreadPools(Threads.UEH); public static final ThreadPools getServerThreadPools() { return SERVER_INSTANCE; } public static final ThreadPools getClientThreadPools(UncaughtExceptionHandler ueh) { return new ThreadPools(ueh); } private static final ThreadPoolExecutor SCHEDULED_FUTURE_CHECKER_POOL = getServerThreadPools().createFixedThreadPool(1, "Scheduled Future Checker", false); private static final ConcurrentLinkedQueue<ScheduledFuture<?>> CRITICAL_RUNNING_TASKS = new ConcurrentLinkedQueue<>(); private static final ConcurrentLinkedQueue<ScheduledFuture<?>> NON_CRITICAL_RUNNING_TASKS = new ConcurrentLinkedQueue<>(); private static Runnable TASK_CHECKER = () -> { final List<ConcurrentLinkedQueue<ScheduledFuture<?>>> queues = List.of(CRITICAL_RUNNING_TASKS, NON_CRITICAL_RUNNING_TASKS); while (true) { queues.forEach(q -> { Iterator<ScheduledFuture<?>> tasks = q.iterator(); while (tasks.hasNext()) { if (checkTaskFailed(tasks.next(), q)) { tasks.remove(); } } }); try { TimeUnit.MINUTES.sleep(1); } catch (InterruptedException ie) { // This thread was interrupted by something while sleeping. We don't want to exit // this thread, so reset the interrupt state on this thread and keep going. Thread.interrupted(); } } }; /** * Checks to see if a ScheduledFuture has exited successfully or thrown an error * * @param future scheduled future to check * @param taskQueue the running task queue from which the future came * @return true if the future should be removed */ private static boolean checkTaskFailed(ScheduledFuture<?> future, ConcurrentLinkedQueue<ScheduledFuture<?>> taskQueue) { // Calling get() on a ScheduledFuture will block unless that scheduled task has // completed. We call isDone() here instead. If the scheduled task is done then // either it was a one-shot task, cancelled or an exception was thrown. if (future.isDone()) { // Now call get() to see if we get an exception. try { future.get(); // If we get here, then a scheduled task exited but did not throw an error // or get canceled. This was likely a one-shot scheduled task (I don't think // we can tell if it's one-shot or not, I think we have to assume that it is // and that a recurring task would not normally be complete). return true; } catch (ExecutionException ee) { // An exception was thrown in the critical task. Throw the error here, which // will then be caught by the AccumuloUncaughtExceptionHandler which will // log the error and terminate the VM. if (taskQueue == CRITICAL_RUNNING_TASKS) { throw new ExecutionError("Critical scheduled background task failed.", ee); } else { LOG.error("Non-critical scheduled background task failed", ee); return true; } } catch (CancellationException ce) { // do nothing here as it appears that the task was canceled. Remove it from // the list of critical tasks return true; } catch (InterruptedException ie) { // current thread was interrupted waiting for get to return, which in theory, // shouldn't happen since the task is done. LOG.info("Interrupted while waiting to check on scheduled background task."); // Reset the interrupt state on this thread Thread.interrupted(); } } return false; } static { SCHEDULED_FUTURE_CHECKER_POOL.execute(TASK_CHECKER); } public static void watchCriticalScheduledTask(ScheduledFuture<?> future) { CRITICAL_RUNNING_TASKS.add(future); } public static void watchCriticalFixedDelay(AccumuloConfiguration aconf, long intervalMillis, Runnable runnable) { ScheduledFuture<?> future = getServerThreadPools().createGeneralScheduledExecutorService(aconf) .scheduleWithFixedDelay(runnable, intervalMillis, intervalMillis, TimeUnit.MILLISECONDS); CRITICAL_RUNNING_TASKS.add(future); } public static void watchNonCriticalScheduledTask(ScheduledFuture<?> future) { NON_CRITICAL_RUNNING_TASKS.add(future); } public static void ensureRunning(ScheduledFuture<?> future, String message) { if (future.isDone()) { try { future.get(); } catch (Exception e) { throw new IllegalStateException(message, e); } // it exited w/o exception, but we still expect it to be running so throw an exception. throw new IllegalStateException(message); } } /** * Resize ThreadPoolExecutor based on current value of maxThreads * * @param pool the ThreadPoolExecutor to modify * @param maxThreads supplier of maxThreads value * @param poolName name of the thread pool */ public static void resizePool(final ThreadPoolExecutor pool, final IntSupplier maxThreads, String poolName) { int count = pool.getMaximumPoolSize(); int newCount = maxThreads.getAsInt(); if (count == newCount) { return; } LOG.info("Changing max threads for {} from {} to {}", poolName, count, newCount); if (newCount > count) { // increasing, increase the max first, or the core will fail to be increased pool.setMaximumPoolSize(newCount); pool.setCorePoolSize(newCount); } else { // decreasing, lower the core size first, or the max will fail to be lowered pool.setCorePoolSize(newCount); pool.setMaximumPoolSize(newCount); } } /** * Resize ThreadPoolExecutor based on current value of Property p * * @param pool the ThreadPoolExecutor to modify * @param conf the AccumuloConfiguration * @param p the property to base the size from */ public static void resizePool(final ThreadPoolExecutor pool, final AccumuloConfiguration conf, final Property p) { resizePool(pool, () -> conf.getCount(p), p.getKey()); } private final UncaughtExceptionHandler handler; private ThreadPools(UncaughtExceptionHandler ueh) { handler = ueh; } /** * Create a thread pool based on a thread pool related property * * @param conf accumulo configuration * @param p thread pool related property * @param emitThreadPoolMetrics When set to true will emit metrics and register the metrics in a * static registry. After the thread pool is deleted, there will still be metrics objects * related to it in the static registry. There is no way to clean these left over objects * up therefore its recommended that this option only be set true for long lived thread * pools. Creating lots of short lived thread pools and registering them can lead to out of * memory errors over long time periods. * @return ExecutorService impl * @throws IllegalArgumentException if property is not handled */ public ThreadPoolExecutor createExecutorService(final AccumuloConfiguration conf, final Property p, boolean emitThreadPoolMetrics) { switch (p) { case GENERAL_THREADPOOL_SIZE: return createScheduledExecutorService(conf.getCount(p), "GeneralExecutor", emitThreadPoolMetrics); case MANAGER_FATE_THREADPOOL_SIZE: return createFixedThreadPool(conf.getCount(p), "Repo Runner", emitThreadPoolMetrics); case MANAGER_STATUS_THREAD_POOL_SIZE: int threads = conf.getCount(p); if (threads == 0) { return createThreadPool(0, Integer.MAX_VALUE, 60L, SECONDS, "GatherTableInformation", new SynchronousQueue<>(), emitThreadPoolMetrics); } else { return createFixedThreadPool(threads, "GatherTableInformation", emitThreadPoolMetrics); } case TSERV_WORKQ_THREADS: return createFixedThreadPool(conf.getCount(p), "distributed work queue", emitThreadPoolMetrics); case TSERV_MINC_MAXCONCURRENT: return createFixedThreadPool(conf.getCount(p), 0L, MILLISECONDS, "minor compactor", emitThreadPoolMetrics); case TSERV_MIGRATE_MAXCONCURRENT: return createFixedThreadPool(conf.getCount(p), 0L, MILLISECONDS, "tablet migration", emitThreadPoolMetrics); case TSERV_ASSIGNMENT_MAXCONCURRENT: return createFixedThreadPool(conf.getCount(p), 0L, MILLISECONDS, "tablet assignment", emitThreadPoolMetrics); case TSERV_SUMMARY_RETRIEVAL_THREADS: return createThreadPool(conf.getCount(p), conf.getCount(p), 60, SECONDS, "summary file retriever", emitThreadPoolMetrics); case TSERV_SUMMARY_REMOTE_THREADS: return createThreadPool(conf.getCount(p), conf.getCount(p), 60, SECONDS, "summary remote", emitThreadPoolMetrics); case TSERV_SUMMARY_PARTITION_THREADS: return createThreadPool(conf.getCount(p), conf.getCount(p), 60, SECONDS, "summary partition", emitThreadPoolMetrics); case GC_DELETE_THREADS: return createFixedThreadPool(conf.getCount(p), "deleting", emitThreadPoolMetrics); default: throw new IllegalArgumentException("Unhandled thread pool property: " + p); } } /** * Create a named thread pool * * @param numThreads number of threads * @param name thread pool name * @param emitThreadPoolMetrics When set to true will emit metrics and register the metrics in a * static registry. After the thread pool is deleted, there will still be metrics objects * related to it in the static registry. There is no way to clean these left over objects * up therefore its recommended that this option only be set true for long lived thread * pools. Creating lots of short lived thread pools and registering them can lead to out of * memory errors over long time periods. * @return ThreadPoolExecutor */ public ThreadPoolExecutor createFixedThreadPool(int numThreads, final String name, boolean emitThreadPoolMetrics) { return createFixedThreadPool(numThreads, DEFAULT_TIMEOUT_MILLISECS, MILLISECONDS, name, emitThreadPoolMetrics); } /** * Create a named thread pool * * @param numThreads number of threads * @param name thread pool name * @param queue queue to use for tasks * @param emitThreadPoolMetrics When set to true will emit metrics and register the metrics in a * static registry. After the thread pool is deleted, there will still be metrics objects * related to it in the static registry. There is no way to clean these left over objects * up therefore its recommended that this option only be set true for long lived thread * pools. Creating lots of short lived thread pools and registering them can lead to out of * memory errors over long time periods. * @return ThreadPoolExecutor */ public ThreadPoolExecutor createFixedThreadPool(int numThreads, final String name, BlockingQueue<Runnable> queue, boolean emitThreadPoolMetrics) { return createThreadPool(numThreads, numThreads, DEFAULT_TIMEOUT_MILLISECS, MILLISECONDS, name, queue, emitThreadPoolMetrics); } /** * Create a named thread pool * * @param numThreads number of threads * @param timeOut core thread time out * @param units core thread time out units * @param name thread pool name * @param emitThreadPoolMetrics When set to true will emit metrics and register the metrics in a * static registry. After the thread pool is deleted, there will still be metrics objects * related to it in the static registry. There is no way to clean these left over objects * up therefore its recommended that this option only be set true for long lived thread * pools. Creating lots of short lived thread pools and registering them can lead to out of * memory errors over long time periods. * @return ThreadPoolExecutor */ public ThreadPoolExecutor createFixedThreadPool(int numThreads, long timeOut, TimeUnit units, final String name, boolean emitThreadPoolMetrics) { return createThreadPool(numThreads, numThreads, timeOut, units, name, emitThreadPoolMetrics); } /** * Create a named thread pool * * @param coreThreads number of threads * @param maxThreads max number of threads * @param timeOut core thread time out * @param units core thread time out units * @param name thread pool name * @param emitThreadPoolMetrics When set to true will emit metrics and register the metrics in a * static registry. After the thread pool is deleted, there will still be metrics objects * related to it in the static registry. There is no way to clean these left over objects * up therefore its recommended that this option only be set true for long lived thread * pools. Creating lots of short lived thread pools and registering them can lead to out of * memory errors over long time periods. * @return ThreadPoolExecutor */ public ThreadPoolExecutor createThreadPool(int coreThreads, int maxThreads, long timeOut, TimeUnit units, final String name, boolean emitThreadPoolMetrics) { return createThreadPool(coreThreads, maxThreads, timeOut, units, name, new LinkedBlockingQueue<>(), emitThreadPoolMetrics); } /** * Create a named thread pool * * @param coreThreads number of threads * @param maxThreads max number of threads * @param timeOut core thread time out * @param units core thread time out units * @param name thread pool name * @param queue queue to use for tasks * @param emitThreadPoolMetrics When set to true will emit metrics and register the metrics in a * static registry. After the thread pool is deleted, there will still be metrics objects * related to it in the static registry. There is no way to clean these left over objects * up therefore its recommended that this option only be set true for long lived thread * pools. Creating lots of short lived thread pools and registering them can lead to out of * memory errors over long time periods. * @return ThreadPoolExecutor */ public ThreadPoolExecutor createThreadPool(int coreThreads, int maxThreads, long timeOut, TimeUnit units, final String name, BlockingQueue<Runnable> queue, boolean emitThreadPoolMetrics) { return createThreadPool(coreThreads, maxThreads, timeOut, units, name, queue, OptionalInt.empty(), emitThreadPoolMetrics); } /** * Create a named thread pool * * @param coreThreads number of threads * @param maxThreads max number of threads * @param timeOut core thread time out * @param units core thread time out units * @param name thread pool name * @param queue queue to use for tasks * @param priority thread priority * @param emitThreadPoolMetrics When set to true will emit metrics and register the metrics in a * static registry. After the thread pool is deleted, there will still be metrics objects * related to it in the static registry. There is no way to clean these left over objects * up therefore its recommended that this option only be set true for long lived thread * pools. Creating lots of short lived thread pools and registering them can lead to out of * memory errors over long time periods. * @return ThreadPoolExecutor */ public ThreadPoolExecutor createThreadPool(int coreThreads, int maxThreads, long timeOut, TimeUnit units, final String name, BlockingQueue<Runnable> queue, OptionalInt priority, boolean emitThreadPoolMetrics) { LOG.trace( "Creating ThreadPoolExecutor for {} with {} core threads and {} max threads {} {} timeout", name, coreThreads, maxThreads, timeOut, units); var result = new ThreadPoolExecutor(coreThreads, maxThreads, timeOut, units, queue, new NamedThreadFactory(name, priority, handler)) { @Override public void execute(Runnable arg0) { super.execute(TraceUtil.wrap(arg0)); } @Override public boolean remove(Runnable task) { return super.remove(TraceUtil.wrap(task)); } @Override public <T> Future<T> submit(Callable<T> task) { return super.submit(TraceUtil.wrap(task)); } @Override public <T> Future<T> submit(Runnable task, T result) { return super.submit(TraceUtil.wrap(task), result); } @Override public Future<?> submit(Runnable task) { return super.submit(TraceUtil.wrap(task)); } }; if (timeOut > 0) { result.allowCoreThreadTimeOut(true); } if (emitThreadPoolMetrics) { MetricsUtil.addExecutorServiceMetrics(result, name); } return result; } /* * If you need the server-side shared ScheduledThreadPoolExecutor, then use * ServerContext.getScheduledExecutor() */ public ScheduledThreadPoolExecutor createGeneralScheduledExecutorService(AccumuloConfiguration conf) { return (ScheduledThreadPoolExecutor) createExecutorService(conf, Property.GENERAL_THREADPOOL_SIZE, true); } /** * Create a named ScheduledThreadPool * * @param numThreads number of threads * @param name thread pool name * @param emitThreadPoolMetrics When set to true will emit metrics and register the metrics in a * static registry. After the thread pool is deleted, there will still be metrics objects * related to it in the static registry. There is no way to clean these left over objects * up therefore its recommended that this option only be set true for long lived thread * pools. Creating lots of short lived thread pools and registering them can lead to out of * memory errors over long time periods. * @return ScheduledThreadPoolExecutor */ public ScheduledThreadPoolExecutor createScheduledExecutorService(int numThreads, final String name, boolean emitThreadPoolMetrics) { LOG.trace("Creating ScheduledThreadPoolExecutor for {} with {} threads", name, numThreads); var result = new ScheduledThreadPoolExecutor(numThreads, new NamedThreadFactory(name, handler)) { @Override public void execute(Runnable command) { super.execute(TraceUtil.wrap(command)); } @Override public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) { return super.schedule(TraceUtil.wrap(callable), delay, unit); } @Override public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { return super.schedule(TraceUtil.wrap(command), delay, unit); } @Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { return super.scheduleAtFixedRate(TraceUtil.wrap(command), initialDelay, period, unit); } @Override public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) { return super.scheduleWithFixedDelay(TraceUtil.wrap(command), initialDelay, delay, unit); } @Override public <T> Future<T> submit(Callable<T> task) { return super.submit(TraceUtil.wrap(task)); } @Override public <T> Future<T> submit(Runnable task, T result) { return super.submit(TraceUtil.wrap(task), result); } @Override public Future<?> submit(Runnable task) { return super.submit(TraceUtil.wrap(task)); } @Override public boolean remove(Runnable task) { return super.remove(TraceUtil.wrap(task)); } }; if (emitThreadPoolMetrics) { MetricsUtil.addExecutorServiceMetrics(result, name); } return result; } }
9,625
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/format/FormatterFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.format; import java.util.Map.Entry; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class FormatterFactory { private static final Logger log = LoggerFactory.getLogger(FormatterFactory.class); public static Formatter getFormatter(Class<? extends Formatter> formatterClass, Iterable<Entry<Key,Value>> scanner, FormatterConfig config) { Formatter formatter = null; try { formatter = formatterClass.getDeclaredConstructor().newInstance(); } catch (Exception e) { log.warn("Unable to instantiate formatter. Using default formatter.", e); formatter = new DefaultFormatter(); } formatter.initialize(scanner, config); return formatter; } public static Formatter getDefaultFormatter(Iterable<Entry<Key,Value>> scanner, FormatterConfig config) { return getFormatter(DefaultFormatter.class, scanner, config); } private FormatterFactory() { // prevent instantiation } }
9,626
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/format/ShardedTableDistributionFormatter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.format; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Map.Entry; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; /** * Formats the rows in a METADATA table scan to show distribution of shards over servers per day. * This can be used to determine the effectiveness of the ShardedTableLoadBalancer * * Use this formatter with the following scan command in the shell: * * scan -b tableId -c ~tab:loc */ public class ShardedTableDistributionFormatter extends AggregatingFormatter { private Map<String,HashSet<String>> countsByDay = new HashMap<>(); @Override protected void aggregateStats(Entry<Key,Value> entry) { if (entry.getKey().getColumnFamily().toString().equals("~tab") && entry.getKey().getColumnQualifier().toString().equals("loc")) { // The row for the sharded table should look like: <tableId>;yyyyMMhh_N String row = entry.getKey().getRow().toString(); // Parse the day out of the row int semicolon = row.indexOf(";"); String day = null; if (semicolon == -1) { day = "NULL "; } else { semicolon++; day = row.substring(semicolon, semicolon + 8); } String server = entry.getValue().toString(); countsByDay.computeIfAbsent(day, k -> new HashSet<>()).add(server); } } @Override protected String getStats() { StringBuilder buf = new StringBuilder(); buf.append("DAY \t\tSERVERS\n"); buf.append("------\t\t-------\n"); for (String day : countsByDay.keySet()) { buf.append(day + "\t\t" + countsByDay.get(day).size() + "\n"); } return buf.toString(); } }
9,627
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/format/AggregatingFormatter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.format; import java.util.Iterator; import java.util.Map.Entry; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; /** * Formatter that will aggregate entries for various display purposes. */ public abstract class AggregatingFormatter extends DefaultFormatter { @Override public String next() { Iterator<Entry<Key,Value>> si = super.getScannerIterator(); checkState(true); while (si.hasNext()) { aggregateStats(si.next()); } return getStats(); } /** * Generate statistics from each {@link Entry}, called for each entry to be iterated over. * * @param next the next entry to aggregate */ protected abstract void aggregateStats(Entry<Key,Value> next); /** * Finalize the aggregation and return the result. Called once at the end. * * @return the aggregation results, suitable for printing to the console */ protected abstract String getStats(); }
9,628
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/format/DefaultFormatter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.format; import java.text.DateFormat; import java.util.Date; import java.util.Iterator; import java.util.Map.Entry; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.ColumnVisibility; import org.apache.hadoop.io.Text; public class DefaultFormatter implements Formatter { private Iterator<Entry<Key,Value>> si; protected FormatterConfig config; /** Used as default DateFormat for some static methods */ private static final ThreadLocal<DateFormat> formatter = DateFormatSupplier.createDefaultFormatSupplier(); @Override public void initialize(Iterable<Entry<Key,Value>> scanner, FormatterConfig config) { checkState(false); si = scanner.iterator(); this.config = new FormatterConfig(config); } @Override public boolean hasNext() { checkState(true); return si.hasNext(); } @Override public String next() { checkState(true); return formatEntry(si.next()); } @Override public void remove() { checkState(true); si.remove(); } protected void checkState(boolean expectInitialized) { if (expectInitialized && si == null) { throw new IllegalStateException("Not initialized"); } if (!expectInitialized && si != null) { throw new IllegalStateException("Already initialized"); } } /** * if showTimestamps, will use {@link FormatterConfig.DefaultDateFormat}.<br> * Preferably, use * {@link DefaultFormatter#formatEntry(java.util.Map.Entry, org.apache.accumulo.core.util.format.FormatterConfig)} */ public static String formatEntry(Entry<Key,Value> entry, boolean showTimestamps) { DateFormat timestampFormat = null; if (showTimestamps) { timestampFormat = formatter.get(); } return formatEntry(entry, timestampFormat); } /* so a new date object doesn't get created for every record in the scan result */ private static ThreadLocal<Date> tmpDate = ThreadLocal.withInitial(Date::new); /** Does not show timestamps if timestampFormat is null */ public static String formatEntry(Entry<Key,Value> entry, DateFormat timestampFormat) { StringBuilder sb = new StringBuilder(); Key key = entry.getKey(); Text buffer = new Text(); // append row appendText(sb, key.getRow(buffer)).append(" "); // append column family appendText(sb, key.getColumnFamily(buffer)).append(":"); // append column qualifier appendText(sb, key.getColumnQualifier(buffer)).append(" "); // append visibility expression sb.append(new ColumnVisibility(key.getColumnVisibility(buffer))); // append timestamp if (timestampFormat != null) { tmpDate.get().setTime(entry.getKey().getTimestamp()); sb.append(" ").append(timestampFormat.format(tmpDate.get())); } Value value = entry.getValue(); // append value if (value != null && value.getSize() > 0) { sb.append("\t"); appendValue(sb, value); } return sb.toString(); } public String formatEntry(Entry<Key,Value> entry) { return formatEntry(entry, this.config); } public static String formatEntry(Entry<Key,Value> entry, FormatterConfig config) { // originally from BinaryFormatter StringBuilder sb = new StringBuilder(); Key key = entry.getKey(); Text buffer = new Text(); final int shownLength = config.getShownLength(); appendText(sb, key.getRow(buffer), shownLength).append(" "); appendText(sb, key.getColumnFamily(buffer), shownLength).append(":"); appendText(sb, key.getColumnQualifier(buffer), shownLength).append(" "); sb.append(new ColumnVisibility(key.getColumnVisibility(buffer))); // append timestamp if (config.willPrintTimestamps() && config.getDateFormatSupplier() != null) { tmpDate.get().setTime(entry.getKey().getTimestamp()); sb.append(" ").append(config.getDateFormatSupplier().get().format(tmpDate.get())); } // append value Value value = entry.getValue(); if (value != null && value.getSize() > 0) { sb.append("\t"); appendValue(sb, value, shownLength); } return sb.toString(); } static StringBuilder appendText(StringBuilder sb, Text t) { return appendBytes(sb, t.getBytes(), 0, t.getLength()); } public static StringBuilder appendText(StringBuilder sb, Text t, int shownLength) { return appendBytes(sb, t.getBytes(), 0, t.getLength(), shownLength); } static StringBuilder appendValue(StringBuilder sb, Value value) { return appendBytes(sb, value.get(), 0, value.get().length); } static StringBuilder appendValue(StringBuilder sb, Value value, int shownLength) { return appendBytes(sb, value.get(), 0, value.get().length, shownLength); } static StringBuilder appendBytes(StringBuilder sb, byte[] ba, int offset, int len) { for (int i = 0; i < len; i++) { int c = 0xff & ba[offset + i]; if (c == '\\') { sb.append("\\\\"); } else if (c >= 32 && c <= 126) { sb.append((char) c); } else { sb.append("\\x").append(String.format("%02X", c)); } } return sb; } static StringBuilder appendBytes(StringBuilder sb, byte[] ba, int offset, int len, int shownLength) { int length = Math.min(len, shownLength); return DefaultFormatter.appendBytes(sb, ba, offset, length); } public Iterator<Entry<Key,Value>> getScannerIterator() { return si; } protected boolean isDoTimestamps() { return config.willPrintTimestamps(); } }
9,629
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/format/DateFormatSupplier.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.format; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.TimeZone; import java.util.function.Supplier; /** * DateFormatSupplier is a {@code ThreadLocal<DateFormat>} that will set the correct TimeZone when * the object is retrieved by {@link #get()}. * * This exists as a way to get around thread safety issues in {@link DateFormat}. This class also * contains helper methods that create some useful DateFormatSuppliers. * * Instances of DateFormatSuppliers can be shared, but note that a DateFormat generated from it will * be shared by all classes within a Thread. * * In general, the state of a retrieved DateFormat should not be changed, unless it makes sense to * only perform a state change within that Thread. */ public abstract class DateFormatSupplier extends ThreadLocal<DateFormat> implements Supplier<DateFormat> { private TimeZone timeZone; public DateFormatSupplier() { timeZone = TimeZone.getDefault(); } public DateFormatSupplier(TimeZone timeZone) { this.timeZone = timeZone; } public TimeZone getTimeZone() { return timeZone; } public void setTimeZone(TimeZone timeZone) { this.timeZone = timeZone; } /** Always sets the TimeZone, which is a fast operation */ @Override public DateFormat get() { final DateFormat df = super.get(); df.setTimeZone(timeZone); return df; } public static final String HUMAN_READABLE_FORMAT = "yyyy/MM/dd HH:mm:ss.SSS"; /** * Create a Supplier for {@link FormatterConfig.DefaultDateFormat}s */ public static DateFormatSupplier createDefaultFormatSupplier() { return new DateFormatSupplier() { @Override protected DateFormat initialValue() { return new FormatterConfig.DefaultDateFormat(); } }; } /** Create a generator for SimpleDateFormats accepting a dateFormat */ public static DateFormatSupplier createSimpleFormatSupplier(final String dateFormat) { return new DateFormatSupplier() { @Override protected SimpleDateFormat initialValue() { return new SimpleDateFormat(dateFormat); } }; } /** Create a generator for SimpleDateFormats accepting a dateFormat */ public static DateFormatSupplier createSimpleFormatSupplier(final String dateFormat, final TimeZone timeZone) { return new DateFormatSupplier(timeZone) { @Override protected SimpleDateFormat initialValue() { return new SimpleDateFormat(dateFormat); } }; } }
9,630
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/format/Formatter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.format; import java.util.Iterator; import java.util.Map.Entry; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; public interface Formatter extends Iterator<String> { void initialize(Iterable<Entry<Key,Value>> scanner, FormatterConfig config); }
9,631
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/format/FormatterConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.format; import static com.google.common.base.Preconditions.checkArgument; import java.text.DateFormat; import java.text.FieldPosition; import java.text.ParsePosition; import java.text.SimpleDateFormat; import java.util.Date; import java.util.function.Supplier; /** * Holds configuration settings for a {@link Formatter} */ public class FormatterConfig { private boolean printTimestamps; private int shownLength; private Supplier<DateFormat> dateFormatSupplier; /** Formats with milliseconds since epoch */ public static class DefaultDateFormat extends SimpleDateFormat { private static final long serialVersionUID = 1L; @Override public StringBuffer format(Date date, StringBuffer toAppendTo, FieldPosition fieldPosition) { toAppendTo.append(date.getTime()); return toAppendTo; } @Override public Date parse(String source, ParsePosition pos) { return new Date(Long.parseLong(source)); } } public FormatterConfig() { this.setPrintTimestamps(false); this.doNotLimitShowLength(); this.dateFormatSupplier = DateFormatSupplier.createDefaultFormatSupplier(); } /** * Copies most fields, but still points to other.dateFormatSupplier. */ public FormatterConfig(FormatterConfig other) { this.printTimestamps = other.printTimestamps; this.shownLength = other.shownLength; this.dateFormatSupplier = other.dateFormatSupplier; } public boolean willPrintTimestamps() { return printTimestamps; } public FormatterConfig setPrintTimestamps(boolean printTimestamps) { this.printTimestamps = printTimestamps; return this; } public int getShownLength() { return shownLength; } public boolean willLimitShowLength() { return this.shownLength != Integer.MAX_VALUE; } /** * If given a negative number, throws an {@link IllegalArgumentException} * * @param shownLength maximum length of formatted output * @return {@code this} to allow chaining of set methods */ public FormatterConfig setShownLength(int shownLength) { checkArgument(shownLength >= 0, "Shown length cannot be negative"); this.shownLength = shownLength; return this; } public FormatterConfig doNotLimitShowLength() { this.shownLength = Integer.MAX_VALUE; return this; } public Supplier<DateFormat> getDateFormatSupplier() { return dateFormatSupplier; } /** * this.dateFormatSupplier points to dateFormatSupplier, so it is recommended that you create a * new {@code Supplier} when calling this function if your {@code Supplier} maintains some kind of * state (see {@link DateFormatSupplier}. */ public FormatterConfig setDateFormatSupplier(Supplier<DateFormat> dateFormatSupplier) { this.dateFormatSupplier = dateFormatSupplier; return this; } }
9,632
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/format/StatisticsDisplayFormatter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.format; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; /** * Does not show contents from scan, only displays statistics. Beware that this work is being done * client side and this was developed as a utility for debugging. If used on large result sets it * will likely fail. */ public class StatisticsDisplayFormatter extends AggregatingFormatter { private Map<String,Long> classifications = new HashMap<>(); private Map<String,Long> columnFamilies = new HashMap<>(); private Map<String,Long> columnQualifiers = new HashMap<>(); private long total = 0; @Override protected void aggregateStats(Entry<Key,Value> entry) { String key; Long count; key = entry.getKey().getColumnVisibility().toString(); count = classifications.get(key); classifications.put(key, count != null ? count + 1 : 0L); key = entry.getKey().getColumnFamily().toString(); count = columnFamilies.get(key); columnFamilies.put(key, count != null ? count + 1 : 0L); key = entry.getKey().getColumnQualifier().toString(); count = columnQualifiers.get(key); columnQualifiers.put(key, count != null ? count + 1 : 0L); ++total; } @Override protected String getStats() { StringBuilder buf = new StringBuilder(); buf.append("CLASSIFICATIONS:\n"); buf.append("----------------\n"); for (String key : classifications.keySet()) { buf.append("\t").append(key).append(": ").append(classifications.get(key)).append("\n"); } buf.append("COLUMN FAMILIES:\n"); buf.append("----------------\n"); for (String key : columnFamilies.keySet()) { buf.append("\t").append(key).append(": ").append(columnFamilies.get(key)).append("\n"); } buf.append("COLUMN QUALIFIERS:\n"); buf.append("------------------\n"); for (String key : columnQualifiers.keySet()) { buf.append("\t").append(key).append(": ").append(columnQualifiers.get(key)).append("\n"); } buf.append(total).append(" entries matched."); total = 0; classifications = new HashMap<>(); columnFamilies = new HashMap<>(); columnQualifiers = new HashMap<>(); return buf.toString(); } }
9,633
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/cleaner/CleanerUtil.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.util.cleaner; import static java.util.Objects.requireNonNull; import java.lang.ref.Cleaner; import java.lang.ref.Cleaner.Cleanable; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.client.BatchWriter; import org.apache.accumulo.core.client.MutationsRejectedException; import org.apache.accumulo.core.fate.zookeeper.ZooCache; import org.slf4j.Logger; /** * This class collects all the cleaner actions executed in various parts of the code. * * <p> * These actions replace the use of finalizers, which are deprecated in Java 9 and later, and should * be avoided. These actions are triggered by their respective objects when those objects become * phantom reachable. * * <p> * In the "unclosed*" methods below, the object should have been closed (implements AutoCloseable). * We could possibly consolidate these into a single method which only warns, and doesn't try to * clean up. We could also delete them entirely, since it is the caller's responsibility to close * AutoCloseable resources, not the object's own responsibility to detect that it wasn't closed. */ public class CleanerUtil { public static final Cleaner CLEANER = Cleaner.create(); /** * Register an action to warn about caller failing to close an {@link AutoCloseable} object. * * <p> * This task will register a generic action to: * <ol> * <li>check that the monitored object wasn't closed, * <li>log a warning that the monitored object was not closed, * <li>attempt to close a resource within the object, and * <li>log an error if the resource cannot be closed for any reason * </ol> * * @param obj the object to monitor for becoming phantom-reachable without having been closed * @param objClass the class whose simple name will be used in the log message for <code>o</code> * (usually an interface name, rather than the actual impl name of the object) * @param closed a flag to check whether <code>o</code> has already been closed * @param log the logger to use when emitting error/warn messages * @param closeable the resource within <code>o</code> to close when <code>o</code> is cleaned; * must not contain a reference to the <code>monitoredObject</code> or it won't become * phantom-reachable and will never be cleaned * @return the registered {@link Cleanable} from {@link Cleaner#register(Object, Runnable)} */ public static Cleanable unclosed(AutoCloseable obj, Class<?> objClass, AtomicBoolean closed, Logger log, AutoCloseable closeable) { String className = requireNonNull(objClass).getSimpleName(); requireNonNull(closed); requireNonNull(log); String closeableClassName = closeable == null ? null : closeable.getClass().getSimpleName(); // capture the stack trace during setup for logging later, so user can find unclosed object var stackTrace = new Exception(); // register the action to run when obj becomes phantom-reachable or clean is explicitly called return CLEANER.register(obj, () -> { if (closed.get()) { // already closed; nothing to do return; } log.warn("{} found unreferenced without calling close()", className, stackTrace); if (closeable != null) { try { closeable.close(); } catch (Exception e1) { log.error("{} internal error; exception closing {}", objClass, closeableClassName, e1); } } }); } public static Cleanable shutdownThreadPoolExecutor(ExecutorService pool, AtomicBoolean closed, Logger log) { requireNonNull(pool); requireNonNull(log); return CLEANER.register(pool, () -> { if (closed.get()) { return; } log.warn("{} found unreferenced without calling shutdown() or shutdownNow()", pool.getClass().getSimpleName()); try { pool.shutdownNow(); } catch (Exception e) { log.error("internal error; exception closing {}", pool.getClass().getSimpleName(), e); } }); } // this done for the BatchWriterIterator test code; I don't trust that pattern, but // registering a cleaner is something any user is probably going to have to do to clean up // resources used in an iterator, until iterators properly implement their own close() public static Cleanable batchWriterAndClientCloser(Object o, Logger log, BatchWriter bw, AccumuloClient client) { requireNonNull(log); requireNonNull(bw); requireNonNull(client); return CLEANER.register(o, () -> { try (client) { bw.close(); } catch (MutationsRejectedException e) { log.error("Failed to close BatchWriter; some mutations may not be applied", e); } }); } // this is dubious; MetadataConstraints should probably use the ZooCache provided by context // can be done in a follow-on action; for now, this merely replaces the previous finalizer public static Cleanable zooCacheClearer(Object o, ZooCache zc) { requireNonNull(zc); return CLEANER.register(o, zc::clear); } }
9,634
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/FileSKVIterator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file; import java.io.DataInputStream; import java.io.IOException; import org.apache.accumulo.core.file.blockfile.impl.CacheProvider; import org.apache.accumulo.core.iteratorsImpl.system.InterruptibleIterator; import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl; import org.apache.hadoop.io.Text; public interface FileSKVIterator extends InterruptibleIterator, AutoCloseable { Text getFirstRow() throws IOException; Text getLastRow() throws IOException; DataInputStream getMetaStore(String name) throws IOException, NoSuchMetaStoreException; FileSKVIterator getSample(SamplerConfigurationImpl sampleConfig); void closeDeepCopies() throws IOException; void setCacheProvider(CacheProvider cacheProvider); @Override void close() throws IOException; }
9,635
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/FileSKVWriter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file; import java.io.DataOutputStream; import java.io.IOException; import java.util.Set; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; public interface FileSKVWriter extends AutoCloseable { boolean supportsLocalityGroups(); void startNewLocalityGroup(String name, Set<ByteSequence> columnFamilies) throws IOException; void startDefaultLocalityGroup() throws IOException; void append(Key key, Value value) throws IOException; DataOutputStream createMetaStore(String name) throws IOException; @Override void close() throws IOException; long getLength() throws IOException; }
9,636
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file; import java.io.IOException; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.file.rfile.RFile; import org.apache.accumulo.core.file.rfile.RFileOperations; import org.apache.accumulo.core.metadata.TabletFile; import org.apache.accumulo.core.summary.SummaryWriter; class DispatchingFileFactory extends FileOperations { private FileOperations findFileFactory(FileOptions options) { TabletFile file = options.getFile(); String name = file.getPath().getName(); String[] sp = name.split("\\."); if (sp.length < 2) { throw new IllegalArgumentException("File name " + name + " has no extension"); } String extension = sp[sp.length - 1]; if (extension.equals(RFile.EXTENSION) || extension.equals(RFile.EXTENSION + "_tmp")) { return new RFileOperations(); } else { throw new IllegalArgumentException("File type " + extension + " not supported"); } } @Override protected long getFileSize(FileOptions options) throws IOException { return findFileFactory(options).getFileSize(options); } @Override protected FileSKVWriter openWriter(FileOptions options) throws IOException { FileOperations fileOps = new RFileOperations(); FileSKVWriter writer = fileOps.openWriter(options); if (options.getTableConfiguration().getBoolean(Property.TABLE_BLOOM_ENABLED)) { writer = new BloomFilterLayer.Writer(writer, options.getTableConfiguration(), options.isAccumuloStartEnabled()); } return SummaryWriter.wrap(writer, options.getTableConfiguration(), options.isAccumuloStartEnabled()); } @Override protected FileSKVIterator openIndex(FileOptions options) throws IOException { return findFileFactory(options).openIndex(options); } @Override protected FileSKVIterator openReader(FileOptions options) throws IOException { FileSKVIterator iter = findFileFactory(options).openReader(options); if (options.getTableConfiguration().getBoolean(Property.TABLE_BLOOM_ENABLED)) { return new BloomFilterLayer.Reader(iter, options.getTableConfiguration()); } else { return iter; } } @Override protected FileSKVIterator openScanReader(FileOptions options) throws IOException { return findFileFactory(options).openScanReader(options); } }
9,637
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/FilePrefix.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file; import java.util.stream.Stream; public enum FilePrefix { BULK_IMPORT("I"), MINOR_COMPACTION("F"), MAJOR_COMPACTION("C"), MAJOR_COMPACTION_ALL_FILES("A"); String prefix; FilePrefix(String prefix) { this.prefix = prefix; } public static FilePrefix fromPrefix(String prefix) { return Stream.of(FilePrefix.values()).filter(p -> p.prefix.equals(prefix)).findAny() .orElseThrow(() -> new IllegalArgumentException("Unknown prefix type: " + prefix)); } public String toPrefix() { return this.prefix; } }
9,638
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file; import static org.apache.accumulo.core.file.blockfile.impl.CacheProvider.NULL_PROVIDER; import java.io.IOException; import java.util.Objects; import java.util.Set; import org.apache.accumulo.core.Constants; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.file.blockfile.impl.CacheProvider; import org.apache.accumulo.core.file.rfile.RFile; import org.apache.accumulo.core.metadata.TabletFile; import org.apache.accumulo.core.metadata.UnreferencedTabletFile; import org.apache.accumulo.core.spi.crypto.CryptoService; import org.apache.accumulo.core.util.ratelimit.RateLimiter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.FileOutputCommitter; import com.github.benmanes.caffeine.cache.Cache; public abstract class FileOperations { private static final String HADOOP_JOBHISTORY_LOCATION = "_logs"; // dir related to // hadoop.job.history.user.location private static final Set<String> validExtensions = Set.of(RFile.EXTENSION); // Sometimes we want to know what files accumulo bulk processing creates private static final Set<String> bulkWorkingFiles = Set.of(Constants.BULK_LOAD_MAPPING, Constants.BULK_RENAME_FILE, FileOutputCommitter.SUCCEEDED_FILE_NAME, HADOOP_JOBHISTORY_LOCATION); public static Set<String> getValidExtensions() { return validExtensions; } public static Set<String> getBulkWorkingFiles() { return bulkWorkingFiles; } public static String getNewFileExtension(AccumuloConfiguration acuconf) { return acuconf.get(Property.TABLE_FILE_TYPE); } public static FileOperations getInstance() { return new DispatchingFileFactory(); } // // Abstract methods (to be implemented by subclasses) // protected abstract long getFileSize(FileOptions options) throws IOException; protected abstract FileSKVWriter openWriter(FileOptions options) throws IOException; protected abstract FileSKVIterator openIndex(FileOptions options) throws IOException; protected abstract FileSKVIterator openScanReader(FileOptions options) throws IOException; protected abstract FileSKVIterator openReader(FileOptions options) throws IOException; // // File operations // /** * Construct an operation object allowing one to create a writer for a file. <br> * Syntax: * * <pre> * FileSKVWriter writer = fileOperations.newWriterBuilder() * .forFile(...) * .withTableConfiguration(...) * .withRateLimiter(...) // optional * .withCompression(...) // optional * .build(); * </pre> */ public WriterBuilder newWriterBuilder() { return new WriterBuilder(); } /** * Construct an operation object allowing one to create an index iterator for a file. <br> * Syntax: * * <pre> * FileSKVIterator iterator = fileOperations.newIndexReaderBuilder() * .forFile(...) * .withTableConfiguration(...) * .withRateLimiter(...) // optional * .withBlockCache(...) // optional * .build(); * </pre> */ public IndexReaderBuilder newIndexReaderBuilder() { return new IndexReaderBuilder(); } /** * Construct an operation object allowing one to create a "scan" reader for a file. Scan readers * do not have any optimizations for seeking beyond their initial position. This is useful for * file operations that only need to scan data within a range and do not need to seek. Therefore * file metadata such as indexes does not need to be kept in memory while the file is scanned. * Also seek optimizations like bloom filters do not need to be loaded. <br> * Syntax: * * <pre> * FileSKVIterator scanner = fileOperations.newScanReaderBuilder() * .forFile(...) * .withTableConfiguration(...) * .overRange(...) * .withRateLimiter(...) // optional * .withBlockCache(...) // optional * .build(); * </pre> */ public ScanReaderBuilder newScanReaderBuilder() { return new ScanReaderBuilder(); } /** * Construct an operation object allowing one to create a reader for a file. A reader constructed * in this manner fully supports seeking, and also enables any optimizations related to seeking * (e.g. Bloom filters). <br> * Syntax: * * <pre> * FileSKVIterator scanner = fileOperations.newReaderBuilder() * .forFile(...) * .withTableConfiguration(...) * .withRateLimiter(...) // optional * .withBlockCache(...) // optional * .seekToBeginning(...) // optional * .build(); * </pre> */ public ReaderBuilder newReaderBuilder() { return new ReaderBuilder(); } protected static class FileOptions { // objects used by all public final AccumuloConfiguration tableConfiguration; public final TabletFile file; public final FileSystem fs; public final Configuration fsConf; public final RateLimiter rateLimiter; // writer only objects public final String compression; public final FSDataOutputStream outputStream; public final boolean enableAccumuloStart; // reader only objects public final CacheProvider cacheProvider; public final Cache<String,Long> fileLenCache; public final boolean seekToBeginning; public final CryptoService cryptoService; // scan reader only objects public final Range range; public final Set<ByteSequence> columnFamilies; public final boolean inclusive; public final boolean dropCacheBehind; protected FileOptions(AccumuloConfiguration tableConfiguration, TabletFile file, FileSystem fs, Configuration fsConf, RateLimiter rateLimiter, String compression, FSDataOutputStream outputStream, boolean enableAccumuloStart, CacheProvider cacheProvider, Cache<String,Long> fileLenCache, boolean seekToBeginning, CryptoService cryptoService, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, boolean dropCacheBehind) { this.tableConfiguration = tableConfiguration; this.file = Objects.requireNonNull(file); this.fs = fs; this.fsConf = fsConf; this.rateLimiter = rateLimiter; this.compression = compression; this.outputStream = outputStream; this.enableAccumuloStart = enableAccumuloStart; this.cacheProvider = cacheProvider; this.fileLenCache = fileLenCache; this.seekToBeginning = seekToBeginning; this.cryptoService = Objects.requireNonNull(cryptoService); this.range = range; this.columnFamilies = columnFamilies; this.inclusive = inclusive; this.dropCacheBehind = dropCacheBehind; } public AccumuloConfiguration getTableConfiguration() { return tableConfiguration; } public TabletFile getFile() { return file; } public FileSystem getFileSystem() { return fs; } public Configuration getConfiguration() { return fsConf; } public RateLimiter getRateLimiter() { return rateLimiter; } public String getCompression() { return compression; } public FSDataOutputStream getOutputStream() { return outputStream; } public boolean isAccumuloStartEnabled() { return enableAccumuloStart; } public CacheProvider getCacheProvider() { return cacheProvider; } public Cache<String,Long> getFileLenCache() { return fileLenCache; } public boolean isSeekToBeginning() { return seekToBeginning; } public CryptoService getCryptoService() { return cryptoService; } public Range getRange() { return range; } public Set<ByteSequence> getColumnFamilies() { return columnFamilies; } public boolean isRangeInclusive() { return inclusive; } } /** * Helper class extended by both writers and readers. */ public static class FileHelper { private AccumuloConfiguration tableConfiguration; private TabletFile file; private FileSystem fs; private Configuration fsConf; private RateLimiter rateLimiter; private CryptoService cryptoService; private boolean dropCacheBehind = false; protected FileHelper fs(FileSystem fs) { this.fs = Objects.requireNonNull(fs); return this; } protected FileHelper fsConf(Configuration fsConf) { this.fsConf = Objects.requireNonNull(fsConf); return this; } protected FileHelper file(TabletFile file) { this.file = Objects.requireNonNull(file); return this; } protected FileHelper tableConfiguration(AccumuloConfiguration tableConfiguration) { this.tableConfiguration = Objects.requireNonNull(tableConfiguration); return this; } protected FileHelper rateLimiter(RateLimiter rateLimiter) { this.rateLimiter = rateLimiter; return this; } protected FileHelper cryptoService(CryptoService cs) { this.cryptoService = Objects.requireNonNull(cs); return this; } protected FileHelper dropCacheBehind(boolean drop) { this.dropCacheBehind = drop; return this; } protected FileOptions toWriterBuilderOptions(String compression, FSDataOutputStream outputStream, boolean startEnabled) { return new FileOptions(tableConfiguration, file, fs, fsConf, rateLimiter, compression, outputStream, startEnabled, NULL_PROVIDER, null, false, cryptoService, null, null, true, dropCacheBehind); } protected FileOptions toReaderBuilderOptions(CacheProvider cacheProvider, Cache<String,Long> fileLenCache, boolean seekToBeginning) { return new FileOptions(tableConfiguration, file, fs, fsConf, rateLimiter, null, null, false, cacheProvider == null ? NULL_PROVIDER : cacheProvider, fileLenCache, seekToBeginning, cryptoService, null, null, true, dropCacheBehind); } protected FileOptions toIndexReaderBuilderOptions(Cache<String,Long> fileLenCache) { return new FileOptions(tableConfiguration, file, fs, fsConf, rateLimiter, null, null, false, NULL_PROVIDER, fileLenCache, false, cryptoService, null, null, true, dropCacheBehind); } protected FileOptions toScanReaderBuilderOptions(Range range, Set<ByteSequence> columnFamilies, boolean inclusive) { return new FileOptions(tableConfiguration, file, fs, fsConf, rateLimiter, null, null, false, NULL_PROVIDER, null, false, cryptoService, range, columnFamilies, inclusive, dropCacheBehind); } protected AccumuloConfiguration getTableConfiguration() { return tableConfiguration; } } /** * Operation object for constructing a writer. */ public class WriterBuilder extends FileHelper implements WriterTableConfiguration { private String compression; private FSDataOutputStream outputStream; private boolean enableAccumuloStart = true; public WriterTableConfiguration forOutputStream(String extension, FSDataOutputStream outputStream, Configuration fsConf, CryptoService cs) throws IOException { this.outputStream = outputStream; file(UnreferencedTabletFile.of(fsConf, new Path("foo/foo" + extension))).fsConf(fsConf) .cryptoService(cs); return this; } public WriterTableConfiguration forFile(TabletFile file, FileSystem fs, Configuration fsConf, CryptoService cs) { file(file).fs(fs).fsConf(fsConf).cryptoService(cs); return this; } @Override public WriterBuilder withTableConfiguration(AccumuloConfiguration tableConfiguration) { tableConfiguration(tableConfiguration); return this; } public WriterBuilder withStartDisabled() { this.enableAccumuloStart = false; return this; } public WriterBuilder withCompression(String compression) { this.compression = compression; return this; } public WriterBuilder withRateLimiter(RateLimiter rateLimiter) { rateLimiter(rateLimiter); return this; } public WriterBuilder dropCachesBehind() { this.dropCacheBehind(true); return this; } public FileSKVWriter build() throws IOException { return openWriter(toWriterBuilderOptions(compression, outputStream, enableAccumuloStart)); } } public interface WriterTableConfiguration { public WriterBuilder withTableConfiguration(AccumuloConfiguration tableConfiguration); } /** * Options common to all {@code FileOperations} which perform reads. */ public class ReaderBuilder extends FileHelper implements ReaderTableConfiguration { private CacheProvider cacheProvider; private Cache<String,Long> fileLenCache; private boolean seekToBeginning = false; public ReaderTableConfiguration forFile(TabletFile file, FileSystem fs, Configuration fsConf, CryptoService cs) { file(file).fs(fs).fsConf(fsConf).cryptoService(cs); return this; } @Override public ReaderBuilder withTableConfiguration(AccumuloConfiguration tableConfiguration) { tableConfiguration(tableConfiguration); return this; } /** * (Optional) Set the block cache pair to be used to optimize reads within the constructed * reader. */ public ReaderBuilder withCacheProvider(CacheProvider cacheProvider) { this.cacheProvider = cacheProvider; return this; } public ReaderBuilder withFileLenCache(Cache<String,Long> fileLenCache) { this.fileLenCache = fileLenCache; return this; } public ReaderBuilder withRateLimiter(RateLimiter rateLimiter) { rateLimiter(rateLimiter); return this; } public ReaderBuilder dropCachesBehind() { this.dropCacheBehind(true); return this; } /** * Seek the constructed iterator to the beginning of its domain before returning. Equivalent to * {@code seekToBeginning(true)}. */ public ReaderBuilder seekToBeginning() { seekToBeginning(true); return this; } /** If true, seek the constructed iterator to the beginning of its domain before returning. */ public ReaderBuilder seekToBeginning(boolean seekToBeginning) { this.seekToBeginning = seekToBeginning; return this; } /** Execute the operation, constructing the specified file reader. */ public FileSKVIterator build() throws IOException { return openReader(toReaderBuilderOptions(cacheProvider, fileLenCache, seekToBeginning)); } } public interface ReaderTableConfiguration { ReaderBuilder withTableConfiguration(AccumuloConfiguration tableConfiguration); } /** * Operation object for opening an index. */ public class IndexReaderBuilder extends FileHelper implements IndexReaderTableConfiguration { private Cache<String,Long> fileLenCache = null; public IndexReaderTableConfiguration forFile(TabletFile file, FileSystem fs, Configuration fsConf, CryptoService cs) { file(file).fs(fs).fsConf(fsConf).cryptoService(cs); return this; } @Override public IndexReaderBuilder withTableConfiguration(AccumuloConfiguration tableConfiguration) { tableConfiguration(tableConfiguration); return this; } public IndexReaderBuilder withFileLenCache(Cache<String,Long> fileLenCache) { this.fileLenCache = fileLenCache; return this; } public FileSKVIterator build() throws IOException { return openIndex(toIndexReaderBuilderOptions(fileLenCache)); } } public interface IndexReaderTableConfiguration { IndexReaderBuilder withTableConfiguration(AccumuloConfiguration tableConfiguration); } /** Operation object for opening a scan reader. */ public class ScanReaderBuilder extends FileHelper implements ScanReaderTableConfiguration { private Range range; private Set<ByteSequence> columnFamilies; private boolean inclusive; public ScanReaderTableConfiguration forFile(TabletFile file, FileSystem fs, Configuration fsConf, CryptoService cs) { file(file).fs(fs).fsConf(fsConf).cryptoService(cs); return this; } @Override public ScanReaderBuilder withTableConfiguration(AccumuloConfiguration tableConfiguration) { tableConfiguration(tableConfiguration); return this; } /** Set the range over which the constructed iterator will search. */ public ScanReaderBuilder overRange(Range range, Set<ByteSequence> columnFamilies, boolean inclusive) { Objects.requireNonNull(range); Objects.requireNonNull(columnFamilies); this.range = range; this.columnFamilies = columnFamilies; this.inclusive = inclusive; return this; } /** Execute the operation, constructing a scan iterator. */ public FileSKVIterator build() throws IOException { return openScanReader(toScanReaderBuilderOptions(range, columnFamilies, inclusive)); } } public interface ScanReaderTableConfiguration { ScanReaderBuilder withTableConfiguration(AccumuloConfiguration tableConfiguration); } }
9,639
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/BloomFilterLayer.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file; import static java.util.concurrent.TimeUnit.SECONDS; import static org.apache.accumulo.core.util.LazySingletons.RANDOM; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.accumulo.core.bloomfilter.DynamicBloomFilter; import org.apache.accumulo.core.classloader.ClassLoaderUtil; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.ConfigurationCopy; import org.apache.accumulo.core.conf.DefaultConfiguration; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.PartialKey; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.file.blockfile.impl.CacheProvider; import org.apache.accumulo.core.file.keyfunctor.KeyFunctor; import org.apache.accumulo.core.file.rfile.RFile; import org.apache.accumulo.core.iterators.IteratorEnvironment; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; import org.apache.accumulo.core.metadata.ReferencedTabletFile; import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl; import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory; import org.apache.accumulo.core.util.threads.ThreadPools; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.util.bloom.Key; import org.apache.hadoop.util.hash.Hash; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * A class that sits on top of different accumulo file formats and provides bloom filter * functionality. */ public class BloomFilterLayer { private static final Logger LOG = LoggerFactory.getLogger(BloomFilterLayer.class); public static final String BLOOM_FILE_NAME = "acu_bloom"; public static final int HASH_COUNT = 5; private static ExecutorService loadThreadPool = null; private static synchronized ExecutorService getLoadThreadPool(int maxLoadThreads) { if (loadThreadPool != null) { return loadThreadPool; } if (maxLoadThreads > 0) { loadThreadPool = ThreadPools.getServerThreadPools().createThreadPool(0, maxLoadThreads, 60, SECONDS, "bloom-loader", false); } return loadThreadPool; } public static class Writer implements FileSKVWriter { private DynamicBloomFilter bloomFilter; private int numKeys; private int vectorSize; private FileSKVWriter writer; private KeyFunctor transformer = null; private boolean closed = false; private long length = -1; Writer(FileSKVWriter writer, AccumuloConfiguration acuconf, boolean useAccumuloStart) { this.writer = writer; initBloomFilter(acuconf, useAccumuloStart); } private synchronized void initBloomFilter(AccumuloConfiguration acuconf, boolean useAccumuloStart) { numKeys = acuconf.getCount(Property.TABLE_BLOOM_SIZE); // vector size should be <code>-kn / (ln(1 - c^(1/k)))</code> bits for // single key, where <code> is the number of hash functions, // <code>n</code> is the number of keys and <code>c</code> is the desired // max. error rate. // Our desired error rate is by default 0.005, i.e. 0.5% double errorRate = acuconf.getFraction(Property.TABLE_BLOOM_ERRORRATE); vectorSize = (int) Math .ceil(-HASH_COUNT * numKeys / Math.log(1.0 - Math.pow(errorRate, 1.0 / HASH_COUNT))); bloomFilter = new DynamicBloomFilter(vectorSize, HASH_COUNT, Hash.parseHashType(acuconf.get(Property.TABLE_BLOOM_HASHTYPE)), numKeys); /** * load KeyFunctor */ try { String context = ClassLoaderUtil.tableContext(acuconf); String classname = acuconf.get(Property.TABLE_BLOOM_KEY_FUNCTOR); Class<? extends KeyFunctor> clazz; if (!useAccumuloStart) { clazz = Writer.class.getClassLoader().loadClass(classname).asSubclass(KeyFunctor.class); } else { clazz = ClassLoaderUtil.loadClass(context, classname, KeyFunctor.class); } transformer = clazz.getDeclaredConstructor().newInstance(); } catch (Exception e) { LOG.error("Failed to find KeyFunctor: " + acuconf.get(Property.TABLE_BLOOM_KEY_FUNCTOR), e); throw new IllegalArgumentException( "Failed to find KeyFunctor: " + acuconf.get(Property.TABLE_BLOOM_KEY_FUNCTOR)); } } @Override public synchronized void append(org.apache.accumulo.core.data.Key key, Value val) throws IOException { writer.append(key, val); Key bloomKey = transformer.transform(key); if (bloomKey.getBytes().length > 0) { bloomFilter.add(bloomKey); } } @Override public synchronized void close() throws IOException { if (closed) { return; } DataOutputStream out = writer.createMetaStore(BLOOM_FILE_NAME); out.writeUTF(transformer.getClass().getName()); bloomFilter.write(out); out.flush(); out.close(); writer.close(); length = writer.getLength(); closed = true; } @Override public DataOutputStream createMetaStore(String name) throws IOException { return writer.createMetaStore(name); } @Override public void startDefaultLocalityGroup() throws IOException { writer.startDefaultLocalityGroup(); } @Override public void startNewLocalityGroup(String name, Set<ByteSequence> columnFamilies) throws IOException { writer.startNewLocalityGroup(name, columnFamilies); } @Override public boolean supportsLocalityGroups() { return writer.supportsLocalityGroups(); } @Override public long getLength() throws IOException { if (closed) { return length; } return writer.getLength(); } } static class BloomFilterLoader { private volatile DynamicBloomFilter bloomFilter; private int loadRequest = 0; private int loadThreshold = 1; private int maxLoadThreads; private Runnable loadTask; private volatile KeyFunctor transformer = null; private volatile boolean closed = false; BloomFilterLoader(final FileSKVIterator reader, AccumuloConfiguration acuconf) { maxLoadThreads = acuconf.getCount(Property.TSERV_BLOOM_LOAD_MAXCONCURRENT); loadThreshold = acuconf.getCount(Property.TABLE_BLOOM_LOAD_THRESHOLD); final String context = ClassLoaderUtil.tableContext(acuconf); loadTask = () -> { // no need to load the bloom filter if the data file is closed if (closed) { return; } String ClassName = null; DataInputStream in = null; try { in = reader.getMetaStore(BLOOM_FILE_NAME); DynamicBloomFilter tmpBloomFilter = new DynamicBloomFilter(); // check for closed again after open but before reading the bloom filter in if (closed) { return; } /** * Load classname for keyFunctor */ ClassName = in.readUTF(); Class<? extends KeyFunctor> clazz = ClassLoaderUtil.loadClass(context, ClassName, KeyFunctor.class); transformer = clazz.getDeclaredConstructor().newInstance(); /** * read in bloom filter */ tmpBloomFilter.readFields(in); // only set the bloom filter after it is fully constructed bloomFilter = tmpBloomFilter; } catch (NoSuchMetaStoreException nsme) { // file does not have a bloom filter, ignore it } catch (IOException ioe) { if (closed) { LOG.debug("Can't open BloomFilter, file closed : {}", ioe.getMessage()); } else { LOG.warn("Can't open BloomFilter", ioe); } bloomFilter = null; } catch (ClassNotFoundException e) { LOG.error("Failed to find KeyFunctor in config: " + sanitize(ClassName), e); bloomFilter = null; } catch (ReflectiveOperationException e) { LOG.error("Could not instantiate KeyFunctor: " + sanitize(ClassName), e); bloomFilter = null; } catch (RuntimeException rte) { if (closed) { LOG.debug("Can't open BloomFilter, RTE after closed ", rte); } else { throw rte; } } finally { if (in != null) { try { in.close(); } catch (IOException e) { LOG.warn("Failed to close ", e); } } } }; initiateLoad(maxLoadThreads); } /** * Prevent potential CRLF injection into logs from read in user data. See the * <a href="https://find-sec-bugs.github.io/bugs.htm#CRLF_INJECTION_LOGS">bug description</a> */ private String sanitize(String msg) { return msg.replaceAll("[\r\n]", ""); } private synchronized void initiateLoad(int maxLoadThreads) { // ensure only one thread initiates loading of bloom filter by // only taking action when loadTask != null if (loadTask != null && loadRequest >= loadThreshold) { try { ExecutorService ltp = getLoadThreadPool(maxLoadThreads); if (ltp == null) { // load the bloom filter in the foreground loadTask.run(); } else { // load the bloom filter in the background ltp.execute(loadTask); } } finally { // set load task to null so no one else can initiate the load loadTask = null; } } loadRequest++; } /** * Checks if this {@link RFile} contains keys from this range. The membership test is performed * using a Bloom filter, so the result has always non-zero probability of false positives. * * @param range range of keys to check * @return false iff key doesn't exist, true if key probably exists. */ boolean probablyHasKey(Range range) { if (bloomFilter == null) { initiateLoad(maxLoadThreads); if (bloomFilter == null) { return true; } } Key bloomKey = transformer.transform(range); if (bloomKey == null || bloomKey.getBytes().length == 0) { return true; } return bloomFilter.membershipTest(bloomKey); } public void close() { this.closed = true; } } public static class Reader implements FileSKVIterator { private BloomFilterLoader bfl; private FileSKVIterator reader; public Reader(FileSKVIterator reader, AccumuloConfiguration acuconf) { this.reader = reader; bfl = new BloomFilterLoader(reader, acuconf); } private Reader(FileSKVIterator src, BloomFilterLoader bfl) { this.reader = src; this.bfl = bfl; } private boolean checkSuper = true; @Override public boolean hasTop() { return checkSuper ? reader.hasTop() : false; } @Override public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException { if (bfl.probablyHasKey(range)) { reader.seek(range, columnFamilies, inclusive); checkSuper = true; } else { checkSuper = false; } } @Override public synchronized void close() throws IOException { bfl.close(); reader.close(); } @Override public Text getFirstRow() throws IOException { return reader.getFirstRow(); } @Override public Text getLastRow() throws IOException { return reader.getLastRow(); } @Override public SortedKeyValueIterator<org.apache.accumulo.core.data.Key,Value> deepCopy(IteratorEnvironment env) { return new BloomFilterLayer.Reader((FileSKVIterator) reader.deepCopy(env), bfl); } @Override public org.apache.accumulo.core.data.Key getTopKey() { return reader.getTopKey(); } @Override public Value getTopValue() { return reader.getTopValue(); } @Override public void init(SortedKeyValueIterator<org.apache.accumulo.core.data.Key,Value> source, Map<String,String> options, IteratorEnvironment env) { throw new UnsupportedOperationException(); } @Override public void next() throws IOException { reader.next(); } @Override public DataInputStream getMetaStore(String name) throws IOException { return reader.getMetaStore(name); } @Override public void closeDeepCopies() throws IOException { reader.closeDeepCopies(); } @Override public void setInterruptFlag(AtomicBoolean flag) { reader.setInterruptFlag(flag); } @Override public FileSKVIterator getSample(SamplerConfigurationImpl sampleConfig) { return new BloomFilterLayer.Reader(reader.getSample(sampleConfig), bfl); } @Override public void setCacheProvider(CacheProvider cacheProvider) { reader.setCacheProvider(cacheProvider); } } public static void main(String[] args) throws IOException { PrintStream out = System.out; HashSet<Integer> valsSet = new HashSet<>(); for (int i = 0; i < 100000; i++) { valsSet.add(RANDOM.get().nextInt(Integer.MAX_VALUE)); } ArrayList<Integer> vals = new ArrayList<>(valsSet); Collections.sort(vals); ConfigurationCopy acuconf = new ConfigurationCopy(DefaultConfiguration.getInstance()); acuconf.set(Property.TABLE_BLOOM_ENABLED, "true"); acuconf.set(Property.TABLE_BLOOM_KEY_FUNCTOR, "accumulo.core.file.keyfunctor.ColumnFamilyFunctor"); acuconf.set(Property.TABLE_FILE_TYPE, RFile.EXTENSION); acuconf.set(Property.TABLE_BLOOM_LOAD_THRESHOLD, "1"); acuconf.set(Property.TSERV_BLOOM_LOAD_MAXCONCURRENT, "1"); Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); String suffix = FileOperations.getNewFileExtension(acuconf); String fname = "/tmp/test." + suffix; FileSKVWriter bmfw = FileOperations.getInstance().newWriterBuilder() .forFile(new ReferencedTabletFile(new Path(fname)), fs, conf, NoCryptoServiceFactory.NONE) .withTableConfiguration(acuconf).build(); long t1 = System.currentTimeMillis(); bmfw.startDefaultLocalityGroup(); for (Integer i : vals) { String fi = String.format("%010d", i); bmfw.append(new org.apache.accumulo.core.data.Key(new Text("r" + fi), new Text("cf1")), new Value("v" + fi)); bmfw.append(new org.apache.accumulo.core.data.Key(new Text("r" + fi), new Text("cf2")), new Value("v" + fi)); } long t2 = System.currentTimeMillis(); out.printf("write rate %6.2f%n", vals.size() / ((t2 - t1) / 1000.0)); bmfw.close(); t1 = System.currentTimeMillis(); FileSKVIterator bmfr = FileOperations.getInstance().newReaderBuilder() .forFile(new ReferencedTabletFile(new Path(fname)), fs, conf, NoCryptoServiceFactory.NONE) .withTableConfiguration(acuconf).build(); t2 = System.currentTimeMillis(); out.println("Opened " + fname + " in " + (t2 - t1)); t1 = System.currentTimeMillis(); int hits = 0; for (int i = 0; i < 5000; i++) { int row = RANDOM.get().nextInt(Integer.MAX_VALUE); String fi = String.format("%010d", row); // bmfr.seek(new Range(new Text("r"+fi))); org.apache.accumulo.core.data.Key k1 = new org.apache.accumulo.core.data.Key(new Text("r" + fi), new Text("cf1")); bmfr.seek(new Range(k1, true, k1.followingKey(PartialKey.ROW_COLFAM), false), new ArrayList<>(), false); if (valsSet.contains(row)) { hits++; if (!bmfr.hasTop()) { out.println("ERROR " + row); } } } t2 = System.currentTimeMillis(); out.printf("random lookup rate : %6.2f%n", 5000 / ((t2 - t1) / 1000.0)); out.println("hits = " + hits); int count = 0; t1 = System.currentTimeMillis(); for (Integer row : valsSet) { String fi = String.format("%010d", row); // bmfr.seek(new Range(new Text("r"+fi))); org.apache.accumulo.core.data.Key k1 = new org.apache.accumulo.core.data.Key(new Text("r" + fi), new Text("cf1")); bmfr.seek(new Range(k1, true, k1.followingKey(PartialKey.ROW_COLFAM), false), new ArrayList<>(), false); if (!bmfr.hasTop()) { out.println("ERROR 2 " + row); } count++; if (count >= 500) { break; } } t2 = System.currentTimeMillis(); out.printf("existing lookup rate %6.2f%n", 500 / ((t2 - t1) / 1000.0)); out.println("expected hits 500. Receive hits: " + count); bmfr.close(); } }
9,640
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/NoSuchMetaStoreException.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file; import java.io.IOException; public class NoSuchMetaStoreException extends IOException { public NoSuchMetaStoreException(String msg, Throwable e) { super(msg, e); } public NoSuchMetaStoreException(String msg) { super(msg); } private static final long serialVersionUID = 1L; }
9,641
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/streams/RateLimitedOutputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.streams; import java.io.DataOutputStream; import java.io.IOException; import org.apache.accumulo.core.util.ratelimit.NullRateLimiter; import org.apache.accumulo.core.util.ratelimit.RateLimiter; import org.apache.hadoop.fs.FSDataOutputStream; /** * A decorator for {@code OutputStream} which limits the rate at which data may be written. * Underlying OutputStream is a FSDataOutputStream. */ public class RateLimitedOutputStream extends DataOutputStream { private final RateLimiter writeLimiter; public RateLimitedOutputStream(FSDataOutputStream fsDataOutputStream, RateLimiter writeLimiter) { super(fsDataOutputStream); this.writeLimiter = writeLimiter == null ? NullRateLimiter.INSTANCE : writeLimiter; } @Override public synchronized void write(int i) throws IOException { writeLimiter.acquire(1); out.write(i); } @Override public synchronized void write(byte[] buffer, int offset, int length) throws IOException { writeLimiter.acquire(length); out.write(buffer, offset, length); } @Override public void close() throws IOException { out.close(); } public long position() { return ((FSDataOutputStream) out).getPos(); } }
9,642
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/streams/SeekableDataInputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.streams; import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import org.apache.hadoop.fs.Seekable; /** * A wrapper converting a {@link Seekable} {@code InputStream} into a {@code Seekable} * {@link DataInputStream} */ public class SeekableDataInputStream extends DataInputStream implements Seekable { public <StreamType extends InputStream & Seekable> SeekableDataInputStream(StreamType stream) { super(stream); } @Override public void seek(long pos) throws IOException { ((Seekable) in).seek(pos); } @Override public long getPos() throws IOException { return ((Seekable) in).getPos(); } @Override public boolean seekToNewSource(long targetPos) throws IOException { return ((Seekable) in).seekToNewSource(targetPos); } }
9,643
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/streams/RateLimitedInputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.streams; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import org.apache.accumulo.core.util.ratelimit.NullRateLimiter; import org.apache.accumulo.core.util.ratelimit.RateLimiter; import org.apache.hadoop.fs.Seekable; /** * A decorator for an {@code InputStream} which limits the rate at which reads are performed. */ public class RateLimitedInputStream extends FilterInputStream implements Seekable { private final RateLimiter rateLimiter; public <StreamType extends InputStream & Seekable> RateLimitedInputStream(StreamType stream, RateLimiter rateLimiter) { super(stream); this.rateLimiter = rateLimiter == null ? NullRateLimiter.INSTANCE : rateLimiter; } @Override public int read() throws IOException { int val = in.read(); if (val >= 0) { rateLimiter.acquire(1); } return val; } @Override public int read(byte[] buffer, int offset, int length) throws IOException { int count = in.read(buffer, offset, length); if (count > 0) { rateLimiter.acquire(count); } return count; } @Override public void seek(long pos) throws IOException { ((Seekable) in).seek(pos); } @Override public long getPos() throws IOException { return ((Seekable) in).getPos(); } @Override public boolean seekToNewSource(long targetPos) throws IOException { return ((Seekable) in).seekToNewSource(targetPos); } }
9,644
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/streams/BoundedRangeFileInputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.streams; import java.io.IOException; import java.io.InputStream; import org.apache.hadoop.fs.Seekable; /** * BoundedRangeFIleInputStream abstracts a contiguous region of a Hadoop FSDataInputStream as a * regular input stream. One can create multiple BoundedRangeFileInputStream on top of the same * FSDataInputStream and they would not interfere with each other. */ public class BoundedRangeFileInputStream extends InputStream { private volatile boolean closed = false; private final InputStream in; private long pos; private long end; private long mark; private final byte[] oneByte = new byte[1]; /** * Constructor * * @param in The FSDataInputStream we connect to. * @param offset Beginning offset of the region. * @param length Length of the region. * * The actual length of the region may be smaller if (off_begin + length) goes beyond the * end of FS input stream. */ public <StreamType extends InputStream & Seekable> BoundedRangeFileInputStream(StreamType in, long offset, long length) { if (offset < 0 || length < 0) { throw new IndexOutOfBoundsException("Invalid offset/length: " + offset + "/" + length); } this.in = in; this.pos = offset; this.end = offset + length; this.mark = -1; } @Override public int available() { return (int) (end - pos); } @Override public int read() throws IOException { int ret = read(oneByte); if (ret == 1) { return oneByte[0] & 0xff; } return -1; } @Override public int read(byte[] b) throws IOException { return read(b, 0, b.length); } @Override public int read(final byte[] b, final int off, int len) throws IOException { if ((off | len | (off + len) | (b.length - (off + len))) < 0) { throw new IndexOutOfBoundsException(); } final int n = (int) Math.min(Integer.MAX_VALUE, Math.min(len, (end - pos))); if (n == 0) { return -1; } int ret = 0; synchronized (in) { // ensuring we are not closed which would be followed by someone else reusing the decompressor if (closed) { throw new IOException("Stream closed"); } ((Seekable) in).seek(pos); ret = in.read(b, off, n); } if (ret < 0) { end = pos; return -1; } pos += ret; return ret; } @Override /* * We may skip beyond the end of the file. */ public long skip(long n) { long len = Math.min(n, end - pos); pos += len; return len; } @Override public synchronized void mark(int readlimit) { mark = pos; } @Override public synchronized void reset() throws IOException { if (mark < 0) { throw new IOException("Resetting to invalid mark"); } pos = mark; } @Override public boolean markSupported() { return true; } @Override public void close() { // Synchronize on the FSDataInputStream to ensure we are blocked if in the read method: // Once this close completes, the underlying decompression stream may be returned to // the pool and subsequently used. Turns out this is a problem if currently using it to read. if (!closed) { synchronized (in) { // Invalidate the state of the stream. closed = true; } } } }
9,645
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/keyfunctor/ColumnQualifierFunctor.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.keyfunctor; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.PartialKey; import org.apache.accumulo.core.data.Range; import org.apache.hadoop.util.bloom.Key; public class ColumnQualifierFunctor implements KeyFunctor { @Override public org.apache.hadoop.util.bloom.Key transform(org.apache.accumulo.core.data.Key acuKey) { byte[] keyData; ByteSequence row = acuKey.getRowData(); ByteSequence cf = acuKey.getColumnFamilyData(); ByteSequence cq = acuKey.getColumnQualifierData(); keyData = new byte[row.length() + cf.length() + cq.length()]; System.arraycopy(row.getBackingArray(), row.offset(), keyData, 0, row.length()); System.arraycopy(cf.getBackingArray(), cf.offset(), keyData, row.length(), cf.length()); System.arraycopy(cq.getBackingArray(), cq.offset(), keyData, row.length() + cf.length(), cq.length()); return new org.apache.hadoop.util.bloom.Key(keyData, 1.0); } @Override public Key transform(Range range) { if (RowFunctor.isRangeInBloomFilter(range, PartialKey.ROW_COLFAM_COLQUAL)) { return transform(range.getStartKey()); } return null; } }
9,646
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/keyfunctor/KeyFunctor.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.keyfunctor; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Range; public interface KeyFunctor { /** * Implementations should return null if a range can not be converted to a bloom key. */ org.apache.hadoop.util.bloom.Key transform(Range range); org.apache.hadoop.util.bloom.Key transform(Key key); }
9,647
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/keyfunctor/RowFunctor.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.keyfunctor; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.PartialKey; import org.apache.accumulo.core.data.Range; import org.apache.hadoop.util.bloom.Key; public class RowFunctor implements KeyFunctor { @Override public Key transform(org.apache.accumulo.core.data.Key acuKey) { byte[] keyData; ByteSequence row = acuKey.getRowData(); keyData = new byte[row.length()]; System.arraycopy(row.getBackingArray(), 0, keyData, 0, row.length()); return new Key(keyData, 1.0); } @Override public Key transform(Range range) { if (isRangeInBloomFilter(range, PartialKey.ROW)) { return transform(range.getStartKey()); } return null; } static boolean isRangeInBloomFilter(Range range, PartialKey keyDepth) { if (range.getStartKey() == null || range.getEndKey() == null) { return false; } if (range.getStartKey().equals(range.getEndKey(), keyDepth)) { return true; } // include everything but the deleted flag in the comparison... return range.getStartKey().followingKey(keyDepth).equals(range.getEndKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME) && !range.isEndKeyInclusive(); } }
9,648
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/keyfunctor/ColumnFamilyFunctor.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.keyfunctor; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.PartialKey; import org.apache.accumulo.core.data.Range; import org.apache.hadoop.util.bloom.Key; public class ColumnFamilyFunctor implements KeyFunctor { public static final PartialKey kDepth = PartialKey.ROW_COLFAM; @Override public Key transform(org.apache.accumulo.core.data.Key acuKey) { byte[] keyData; ByteSequence row = acuKey.getRowData(); ByteSequence cf = acuKey.getColumnFamilyData(); keyData = new byte[row.length() + cf.length()]; System.arraycopy(row.getBackingArray(), row.offset(), keyData, 0, row.length()); System.arraycopy(cf.getBackingArray(), cf.offset(), keyData, row.length(), cf.length()); return new Key(keyData, 1.0); } @Override public Key transform(Range range) { if (RowFunctor.isRangeInBloomFilter(range, kDepth)) { return transform(range.getStartKey()); } return null; } }
9,649
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.stream.Collectors.toCollection; import java.io.BufferedWriter; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.TreeSet; import org.apache.accumulo.core.cli.ConfigOpts; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.SiteConfiguration; import org.apache.accumulo.core.crypto.CryptoFactoryLoader; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.file.FileOperations; import org.apache.accumulo.core.file.FileSKVIterator; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; import org.apache.accumulo.core.iteratorsImpl.system.MultiIterator; import org.apache.accumulo.core.metadata.UnreferencedTabletFile; import org.apache.accumulo.core.spi.crypto.CryptoEnvironment; import org.apache.accumulo.core.spi.crypto.CryptoService; import org.apache.accumulo.core.util.TextUtil; import org.apache.accumulo.start.spi.KeywordExecutable; import org.apache.datasketches.quantiles.ItemsSketch; import org.apache.datasketches.quantilescommon.QuantileSearchCriteria; import org.apache.datasketches.quantilescommon.QuantilesUtil; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.BinaryComparable; import org.apache.hadoop.io.Text; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.beust.jcommander.Parameter; import com.google.auto.service.AutoService; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; @AutoService(KeywordExecutable.class) @SuppressFBWarnings(value = "PATH_TRAVERSAL_OUT", justification = "app is run in same security context as user providing the filename") public class GenerateSplits implements KeywordExecutable { private static final Logger log = LoggerFactory.getLogger(GenerateSplits.class); static class Opts extends ConfigOpts { @Parameter(names = {"-n", "--num"}, description = "The number of split points to generate. Can be used to create n+1 tablets. Cannot use with the split size option.") public int numSplits = 0; @Parameter(names = {"-ss", "--split-size"}, description = "The minimum split size in uncompressed bytes. Cannot use with num splits option.") public long splitSize = 0; @Parameter(names = {"-b64", "--base64encoded"}, description = "Base 64 encode the split points") public boolean base64encode = false; @Parameter(names = {"-sf", "--splits-file"}, description = "Output the splits to a file") public String outputFile; @Parameter(description = "<file|directory>[ <file|directory>...] -n <num> | -ss <split_size>") public List<String> files = new ArrayList<>(); } @Override public String keyword() { return "generate-splits"; } @Override public String description() { return "Generate split points from a set of 1 or more rfiles"; } public static void main(String[] args) throws Exception { new GenerateSplits().execute(args); } @Override public void execute(String[] args) throws Exception { Opts opts = new Opts(); opts.parseArgs(GenerateSplits.class.getName(), args); if (opts.files.isEmpty()) { throw new IllegalArgumentException("No files were given"); } Configuration hadoopConf = new Configuration(); SiteConfiguration siteConf = opts.getSiteConfiguration(); CryptoService cryptoService = CryptoFactoryLoader .getServiceForClient(CryptoEnvironment.Scope.TABLE, siteConf.getAllCryptoProperties()); boolean encode = opts.base64encode; TreeSet<String> splits; if (opts.numSplits > 0 && opts.splitSize > 0) { throw new IllegalArgumentException("Requested number of splits and split size."); } if (opts.numSplits == 0 && opts.splitSize == 0) { throw new IllegalArgumentException("Required number of splits or split size."); } int requestedNumSplits = opts.numSplits; long splitSize = opts.splitSize; FileSystem fs = FileSystem.get(hadoopConf); List<UnreferencedTabletFile> files = new ArrayList<>(); for (String file : opts.files) { Path path = new Path(file); fs = PrintInfo.resolveFS(log, hadoopConf, path); // get all the files in the directory files.addAll(getFiles(fs, path)); } if (files.isEmpty()) { throw new IllegalArgumentException("No files were found in " + opts.files); } else { log.trace("Found the following files: {}", files); } // if no size specified look at indexed keys first if (opts.splitSize == 0) { splits = getIndexKeys(siteConf, hadoopConf, fs, files, requestedNumSplits, encode, cryptoService); // if there weren't enough splits indexed, try again with size = 0 if (splits.size() < requestedNumSplits) { log.info("Only found {} indexed keys but need {}. Doing a full scan on files {}", splits.size(), requestedNumSplits, files); splits = getSplitsFromFullScan(siteConf, hadoopConf, files, fs, requestedNumSplits, encode, cryptoService); } } else { splits = getSplitsBySize(siteConf, hadoopConf, files, fs, splitSize, encode, cryptoService); } TreeSet<String> desiredSplits; int numFound = splits.size(); // its possible we found too many indexed so get requested number but evenly spaced if (opts.splitSize == 0 && numFound > requestedNumSplits) { desiredSplits = getEvenlySpacedSplits(numFound, requestedNumSplits, splits.iterator()); } else { if (numFound < requestedNumSplits) { log.warn("Only found {} splits", numFound); } desiredSplits = splits; } log.info("Generated {} splits", desiredSplits.size()); if (opts.outputFile != null) { log.info("Writing splits to file {} ", opts.outputFile); try (var writer = new PrintWriter(new BufferedWriter( new OutputStreamWriter(new FileOutputStream(opts.outputFile), UTF_8)))) { desiredSplits.forEach(writer::println); } } else { desiredSplits.forEach(System.out::println); } } private List<UnreferencedTabletFile> getFiles(FileSystem fs, Path path) throws IOException { List<UnreferencedTabletFile> files = new ArrayList<>(); if (fs.getFileStatus(path).isDirectory()) { var iter = fs.listFiles(path, true); while (iter.hasNext()) { files.addAll(getFiles(fs, iter.next().getPath())); } } else { if (!path.toString().endsWith(".rf")) { throw new IllegalArgumentException("Provided file (" + path + ") does not end with '.rf'"); } files.add(UnreferencedTabletFile.of(fs, path)); } return files; } private Text[] getQuantiles(SortedKeyValueIterator<Key,Value> iterator, int numSplits) throws IOException { var itemsSketch = ItemsSketch.getInstance(Text.class, BinaryComparable::compareTo); while (iterator.hasTop()) { Text row = iterator.getTopKey().getRow(); itemsSketch.update(row); iterator.next(); } // the number requested represents the number of regions between the resulting array elements // the actual number of array elements is one more than that to account for endpoints; // so, we ask for one more because we want the number of median elements in the array to // represent the number of split points and we will drop the first and last array element double[] ranks = QuantilesUtil.equallyWeightedRanks(numSplits + 1); // the choice to use INCLUSIVE or EXCLUSIVE is arbitrary here; EXCLUSIVE matches the behavior // of datasketches 3.x, so we might as well preserve that for 4.x Text[] items = itemsSketch.getQuantiles(ranks, QuantileSearchCriteria.EXCLUSIVE); // drop the min and max, so we only keep the median elements to use as split points return Arrays.copyOfRange(items, 1, items.length - 1); } /** * Return the requested number of splits, evenly spaced across splits found. Visible for testing */ static TreeSet<String> getEvenlySpacedSplits(int numFound, long requestedNumSplits, Iterator<String> splitsIter) { TreeSet<String> desiredSplits = new TreeSet<>(); // This is how much each of the found rows will advance towards a desired split point. Add // one to numSplits because if we request 9 splits, there will 10 tablets and we want the 9 // splits evenly spaced between the 10 tablets. double increment = (requestedNumSplits + 1.0) / numFound; log.debug("Found {} splits but requested {} so picking incrementally by {}", numFound, requestedNumSplits, increment); // Tracks how far along we are towards the next split. double progressToNextSplit = 0; for (int i = 0; i < numFound; i++) { progressToNextSplit += increment; String next = splitsIter.next(); if (progressToNextSplit > 1 && desiredSplits.size() < requestedNumSplits) { desiredSplits.add(next); progressToNextSplit -= 1; // decrease by 1 to preserve any partial progress } } return desiredSplits; } private static String encode(boolean encode, Text text) { if (text == null) { return null; } byte[] bytes = TextUtil.getBytes(text); if (encode) { return Base64.getEncoder().encodeToString(bytes); } else { // drop non printable characters StringBuilder sb = new StringBuilder(); for (byte aByte : bytes) { int c = 0xff & aByte; if (c == '\\') { sb.append("\\\\"); } else if (c >= 32 && c <= 126) { sb.append((char) c); } else { log.debug("Dropping non printable char: \\x{}", Integer.toHexString(c)); } } return sb.toString(); } } /** * Scan the files for indexed keys first since it is more efficient than a full file scan. */ private TreeSet<String> getIndexKeys(AccumuloConfiguration accumuloConf, Configuration hadoopConf, FileSystem fs, List<UnreferencedTabletFile> files, int requestedNumSplits, boolean base64encode, CryptoService cs) throws IOException { Text[] splitArray; List<SortedKeyValueIterator<Key,Value>> readers = new ArrayList<>(files.size()); List<FileSKVIterator> fileReaders = new ArrayList<>(files.size()); try { for (UnreferencedTabletFile file : files) { FileSKVIterator reader = FileOperations.getInstance().newIndexReaderBuilder() .forFile(file, fs, hadoopConf, cs).withTableConfiguration(accumuloConf).build(); readers.add(reader); fileReaders.add(reader); } var iterator = new MultiIterator(readers, true); splitArray = getQuantiles(iterator, requestedNumSplits); } finally { for (var r : fileReaders) { r.close(); } } log.debug("Got {} splits from indices of {}", splitArray.length, files); return Arrays.stream(splitArray).map(t -> encode(base64encode, t)) .collect(toCollection(TreeSet::new)); } private TreeSet<String> getSplitsFromFullScan(SiteConfiguration accumuloConf, Configuration hadoopConf, List<UnreferencedTabletFile> files, FileSystem fs, int numSplits, boolean base64encode, CryptoService cs) throws IOException { Text[] splitArray; List<FileSKVIterator> fileReaders = new ArrayList<>(files.size()); List<SortedKeyValueIterator<Key,Value>> readers = new ArrayList<>(files.size()); SortedKeyValueIterator<Key,Value> iterator; try { for (UnreferencedTabletFile file : files) { FileSKVIterator reader = FileOperations.getInstance().newScanReaderBuilder() .forFile(file, fs, hadoopConf, cs).withTableConfiguration(accumuloConf) .overRange(new Range(), Set.of(), false).build(); readers.add(reader); fileReaders.add(reader); } iterator = new MultiIterator(readers, false); iterator.seek(new Range(), Collections.emptySet(), false); splitArray = getQuantiles(iterator, numSplits); } finally { for (var r : fileReaders) { r.close(); } } log.debug("Got {} splits from quantiles across {} files", splitArray.length, files.size()); return Arrays.stream(splitArray).map(t -> encode(base64encode, t)) .collect(toCollection(TreeSet::new)); } /** * Get number of splits based on requested size of split. */ private TreeSet<String> getSplitsBySize(AccumuloConfiguration accumuloConf, Configuration hadoopConf, List<UnreferencedTabletFile> files, FileSystem fs, long splitSize, boolean base64encode, CryptoService cs) throws IOException { long currentSplitSize = 0; long totalSize = 0; TreeSet<String> splits = new TreeSet<>(); List<FileSKVIterator> fileReaders = new ArrayList<>(files.size()); List<SortedKeyValueIterator<Key,Value>> readers = new ArrayList<>(files.size()); SortedKeyValueIterator<Key,Value> iterator; try { for (UnreferencedTabletFile file : files) { FileSKVIterator reader = FileOperations.getInstance().newScanReaderBuilder() .forFile(file, fs, hadoopConf, cs).withTableConfiguration(accumuloConf) .overRange(new Range(), Set.of(), false).build(); readers.add(reader); fileReaders.add(reader); } iterator = new MultiIterator(readers, false); iterator.seek(new Range(), Collections.emptySet(), false); while (iterator.hasTop()) { Key key = iterator.getTopKey(); Value val = iterator.getTopValue(); int size = key.getSize() + val.getSize(); currentSplitSize += size; totalSize += size; if (currentSplitSize > splitSize) { splits.add(encode(base64encode, key.getRow())); currentSplitSize = 0; } iterator.next(); } } finally { for (var r : fileReaders) { r.close(); } } log.debug("Got {} splits with split size {} out of {} total bytes read across {} files", splits.size(), splitSize, totalSize, files.size()); return splits; } }
9,650
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/MetricsGatherer.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import java.io.PrintStream; import java.util.ArrayList; import java.util.Map; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; import org.apache.hadoop.io.Text; /** * Interface used to gather metrics from RFiles. * * @param <T> Type used to return metrics in getMetrics(). This does not impact collection of * metrics at all, is only used in that method. */ public interface MetricsGatherer<T> { /** * Initialize the gatherer when it is registered with the RFile Reader * * @param cf Map of the LocalityGroup names to their column families */ void init(Map<String,ArrayList<ByteSequence>> cf); /** * Start a new LocalityGroup. This method is used when the RFile seeks to the next LocalityGroup. * * @param cf Text object of the column family of the first entry in the locality group */ void startLocalityGroup(Text cf); /** * Collect and store metrics for the given entry. * * @param key Key object of the entry you are collecting metrics from * * @param val Value object of the entry you are collecting metrics from * */ void addMetric(Key key, Value val); /** * Start a new block within a LocalityGroup. This method is used when the RFile moves on the the * next block in the LocalityGroup. */ void startBlock(); /** * Print the results of the metrics gathering by locality group in the format: Metric name Number * of keys Percentage of keys Number of blocks Percentage of blocks * * @param hash Boolean to determine whether the values being printed should be hashed * @param metricWord String of the name of the metric that was collected * @param out PrintStream of where the information should be written to */ void printMetrics(boolean hash, String metricWord, PrintStream out); /** * @return the metrics gathered */ T getMetrics(); }
9,651
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import static java.util.Objects.requireNonNull; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; import java.io.PrintStream; import java.io.UncheckedIOException; import java.util.AbstractList; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.ListIterator; import java.util.Map; import java.util.RandomAccess; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile; import org.apache.accumulo.core.file.blockfile.impl.SeekableByteArrayInputStream; import org.apache.accumulo.core.file.rfile.bcfile.BCFile; import org.apache.accumulo.core.file.rfile.bcfile.Utils; import org.apache.hadoop.io.WritableComparable; public class MultiLevelIndex { public static class IndexEntry implements WritableComparable<IndexEntry> { private Key key; private int entries; private long offset; private long compressedSize; private long rawSize; private boolean newFormat; IndexEntry(Key k, int e, long offset, long compressedSize, long rawSize) { this.key = k; this.entries = e; this.offset = offset; this.compressedSize = compressedSize; this.rawSize = rawSize; newFormat = true; } public IndexEntry(boolean newFormat) { this.newFormat = newFormat; } @Override public void readFields(DataInput in) throws IOException { key = new Key(); key.readFields(in); entries = in.readInt(); if (newFormat) { offset = Utils.readVLong(in); compressedSize = Utils.readVLong(in); rawSize = Utils.readVLong(in); } else { offset = -1; compressedSize = -1; rawSize = -1; } } @Override public void write(DataOutput out) throws IOException { key.write(out); out.writeInt(entries); if (newFormat) { Utils.writeVLong(out, offset); Utils.writeVLong(out, compressedSize); Utils.writeVLong(out, rawSize); } } public Key getKey() { return key; } public int getNumEntries() { return entries; } public long getOffset() { return offset; } public long getCompressedSize() { return compressedSize; } public long getRawSize() { return rawSize; } @Override public int compareTo(IndexEntry o) { return key.compareTo(o.key); } @Override public boolean equals(Object o) { if (o instanceof IndexEntry) { return compareTo((IndexEntry) o) == 0; } return false; } @Override public int hashCode() { assert false : "hashCode not designed"; return 42; // any arbitrary constant will do } } private abstract static class SerializedIndexBase<T> extends AbstractList<T> implements RandomAccess { protected int[] offsets; protected byte[] data; protected SeekableByteArrayInputStream sbais; protected DataInputStream dis; protected int offsetsOffset; protected int indexOffset; protected int numOffsets; protected int indexSize; SerializedIndexBase(int[] offsets, byte[] data) { requireNonNull(offsets, "offsets argument was null"); requireNonNull(data, "data argument was null"); this.offsets = offsets; this.data = data; sbais = new SeekableByteArrayInputStream(data); dis = new DataInputStream(sbais); } SerializedIndexBase(byte[] data, int offsetsOffset, int numOffsets, int indexOffset, int indexSize) { requireNonNull(data, "data argument was null"); sbais = new SeekableByteArrayInputStream(data, indexOffset + indexSize); dis = new DataInputStream(sbais); this.offsetsOffset = offsetsOffset; this.indexOffset = indexOffset; this.numOffsets = numOffsets; this.indexSize = indexSize; } /** * Before this method is called, {@code this.dis} is seeked to the offset of a serialized index * entry. This method should deserialize the index entry by reading from {@code this.dis} and * return it. */ protected abstract T newValue() throws IOException; @Override public T get(int index) { try { int offset; if (offsets == null) { if (index < 0 || index >= numOffsets) { throw new IndexOutOfBoundsException("index:" + index + " numOffsets:" + numOffsets); } sbais.seek(offsetsOffset + index * 4); offset = dis.readInt(); } else { offset = offsets[index]; } sbais.seek(indexOffset + offset); return newValue(); } catch (IOException ioe) { throw new UncheckedIOException(ioe); } } @Override public int size() { if (offsets == null) { return numOffsets; } else { return offsets.length; } } } // a list that deserializes index entries on demand private static class SerializedIndex extends SerializedIndexBase<IndexEntry> { private boolean newFormat; SerializedIndex(int[] offsets, byte[] data, boolean newFormat) { super(offsets, data); this.newFormat = newFormat; } SerializedIndex(byte[] data, int offsetsOffset, int numOffsets, int indexOffset, int indexSize) { super(data, offsetsOffset, numOffsets, indexOffset, indexSize); this.newFormat = true; } public long sizeInBytes() { if (offsets == null) { return indexSize + 4L * numOffsets; } else { return data.length + 4L * offsets.length; } } @Override protected IndexEntry newValue() throws IOException { IndexEntry ie = new IndexEntry(newFormat); ie.readFields(dis); return ie; } } private static class KeyIndex extends SerializedIndexBase<Key> { KeyIndex(int[] offsets, byte[] data) { super(offsets, data); } KeyIndex(byte[] data, int offsetsOffset, int numOffsets, int indexOffset, int indexSize) { super(data, offsetsOffset, numOffsets, indexOffset, indexSize); } @Override protected Key newValue() throws IOException { Key key = new Key(); key.readFields(dis); return key; } } static class IndexBlock { private ByteArrayOutputStream indexBytes; private DataOutputStream indexOut; private ArrayList<Integer> offsets; private int level; private int offset; private boolean hasNext; private byte[] data; private int[] offsetsArray; private int numOffsets; private int offsetsOffset; private int indexSize; private int indexOffset; private boolean newFormat; public IndexBlock(int level, int totalAdded) { // System.out.println("IndexBlock("+level+","+levelCount+","+totalAdded+")"); this.level = level; this.offset = totalAdded; indexBytes = new ByteArrayOutputStream(); indexOut = new DataOutputStream(indexBytes); offsets = new ArrayList<>(); } public IndexBlock() {} public void add(Key key, int value, long offset, long compressedSize, long rawSize) throws IOException { offsets.add(indexOut.size()); new IndexEntry(key, value, offset, compressedSize, rawSize).write(indexOut); } int getSize() { return indexOut.size() + 4 * offsets.size(); } public void write(DataOutput out) throws IOException { out.writeInt(level); out.writeInt(offset); out.writeBoolean(hasNext); out.writeInt(offsets.size()); for (Integer offset : offsets) { out.writeInt(offset); } indexOut.close(); byte[] indexData = indexBytes.toByteArray(); out.writeInt(indexData.length); out.write(indexData); } public void readFields(DataInput in, int version) throws IOException { if (version == RFile.RINDEX_VER_6 || version == RFile.RINDEX_VER_7 || version == RFile.RINDEX_VER_8) { level = in.readInt(); offset = in.readInt(); hasNext = in.readBoolean(); CachableBlockFile.CachedBlockRead abr = (CachableBlockFile.CachedBlockRead) in; if (abr.isIndexable()) { // this block is cached, so avoid copy data = abr.getBuffer(); // use offset data in serialized form and avoid copy numOffsets = abr.readInt(); offsetsOffset = abr.getPosition(); int skipped = abr.skipBytes(numOffsets * 4); if (skipped != numOffsets * 4) { throw new IOException("Skipped less than expected " + skipped + " " + (numOffsets * 4)); } indexSize = in.readInt(); indexOffset = abr.getPosition(); skipped = abr.skipBytes(indexSize); if (skipped != indexSize) { throw new IOException("Skipped less than expected " + skipped + " " + indexSize); } } else { numOffsets = in.readInt(); offsetsArray = new int[numOffsets]; for (int i = 0; i < numOffsets; i++) { offsetsArray[i] = in.readInt(); } indexSize = in.readInt(); data = new byte[indexSize]; in.readFully(data); newFormat = true; } } else if (version == RFile.RINDEX_VER_3) { level = 0; offset = 0; hasNext = false; int size = in.readInt(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); ArrayList<Integer> oal = new ArrayList<>(); for (int i = 0; i < size; i++) { IndexEntry ie = new IndexEntry(false); oal.add(dos.size()); ie.readFields(in); ie.write(dos); } dos.close(); int[] oia = new int[oal.size()]; for (int i = 0; i < oal.size(); i++) { oia[i] = oal.get(i); } data = baos.toByteArray(); offsetsArray = oia; newFormat = false; } else if (version == RFile.RINDEX_VER_4) { level = 0; offset = 0; hasNext = false; int numIndexEntries = in.readInt(); int[] offsets = new int[numIndexEntries]; for (int i = 0; i < numIndexEntries; i++) { offsets[i] = in.readInt(); } int size = in.readInt(); byte[] indexData = new byte[size]; in.readFully(indexData); data = indexData; offsetsArray = offsets; newFormat = false; } else { throw new IllegalStateException("Unexpected version " + version); } } SerializedIndex getIndex() { // create SerializedIndex on demand as each has an internal input stream over byte array... // keeping a SerializedIndex ref for the object could lead to // problems with deep copies. if (offsetsArray == null) { return new SerializedIndex(data, offsetsOffset, numOffsets, indexOffset, indexSize); } else { return new SerializedIndex(offsetsArray, data, newFormat); } } public List<Key> getKeyIndex() { // create KeyIndex on demand as each has an internal input stream over byte array... keeping a // KeyIndex ref for the object could lead to problems with // deep copies. if (offsetsArray == null) { return new KeyIndex(data, offsetsOffset, numOffsets, indexOffset, indexSize); } else { return new KeyIndex(offsetsArray, data); } } int getLevel() { return level; } int getOffset() { return offset; } boolean hasNext() { return hasNext; } void setHasNext(boolean b) { this.hasNext = b; } } /** * this class buffers writes to the index so that chunks of index blocks are contiguous in the * file instead of having index blocks sprinkled throughout the file making scans of the entire * index slow. */ public static class BufferedWriter { private Writer writer; private DataOutputStream buffer; private int buffered; private ByteArrayOutputStream baos; public BufferedWriter(Writer writer) { this.writer = writer; baos = new ByteArrayOutputStream(1 << 20); buffer = new DataOutputStream(baos); buffered = 0; } private void flush() throws IOException { buffer.close(); DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray())); IndexEntry ie = new IndexEntry(true); for (int i = 0; i < buffered; i++) { ie.readFields(dis); writer.add(ie.getKey(), ie.getNumEntries(), ie.getOffset(), ie.getCompressedSize(), ie.getRawSize()); } buffered = 0; baos = new ByteArrayOutputStream(1 << 20); buffer = new DataOutputStream(baos); } public void add(Key key, int data, long offset, long compressedSize, long rawSize) throws IOException { if (buffer.size() > (10 * 1 << 20)) { flush(); } new IndexEntry(key, data, offset, compressedSize, rawSize).write(buffer); buffered++; } public void addLast(Key key, int data, long offset, long compressedSize, long rawSize) throws IOException { flush(); writer.addLast(key, data, offset, compressedSize, rawSize); } public void close(DataOutput out) throws IOException { writer.close(out); } } public static class Writer { private int threshold; private ArrayList<IndexBlock> levels; private int totalAdded; private boolean addedLast = false; private BCFile.Writer blockFileWriter; Writer(BCFile.Writer blockFileWriter, int maxBlockSize) { this.blockFileWriter = blockFileWriter; this.threshold = maxBlockSize; levels = new ArrayList<>(); } private void add(int level, Key key, int data, long offset, long compressedSize, long rawSize) throws IOException { if (level == levels.size()) { levels.add(new IndexBlock(level, 0)); } IndexBlock iblock = levels.get(level); iblock.add(key, data, offset, compressedSize, rawSize); } private void flush(int level, Key lastKey, boolean last) throws IOException { if (last && level == levels.size() - 1) { return; } IndexBlock iblock = levels.get(level); if ((iblock.getSize() > threshold && iblock.offsets.size() > 1) || last) { BCFile.Writer.BlockAppender out = blockFileWriter.prepareDataBlock(); iblock.setHasNext(!last); iblock.write(out); out.close(); add(level + 1, lastKey, 0, out.getStartPos(), out.getCompressedSize(), out.getRawSize()); flush(level + 1, lastKey, last); if (last) { levels.set(level, null); } else { levels.set(level, new IndexBlock(level, totalAdded)); } } } public void add(Key key, int data, long offset, long compressedSize, long rawSize) throws IOException { totalAdded++; add(0, key, data, offset, compressedSize, rawSize); flush(0, key, false); } public void addLast(Key key, int data, long offset, long compressedSize, long rawSize) throws IOException { if (addedLast) { throw new IllegalStateException("already added last"); } totalAdded++; add(0, key, data, offset, compressedSize, rawSize); flush(0, key, true); addedLast = true; } public void close(DataOutput out) throws IOException { if (totalAdded > 0 && !addedLast) { throw new IllegalStateException("did not call addLast"); } out.writeInt(totalAdded); // save root node if (levels.isEmpty()) { new IndexBlock(0, 0).write(out); } else { levels.get(levels.size() - 1).write(out); } } } public static class Reader { private IndexBlock rootBlock; private CachableBlockFile.Reader blockStore; private int version; private int size; public class Node { private Node parent; private IndexBlock indexBlock; private int currentPos; Node(Node parent, IndexBlock iBlock) { this.parent = parent; this.indexBlock = iBlock; } Node(IndexBlock rootInfo) { this.parent = null; this.indexBlock = rootInfo; } private Node lookup(Key key) throws IOException { int pos = Collections.binarySearch(indexBlock.getKeyIndex(), key, Comparator.naturalOrder()); if (pos < 0) { pos = (pos * -1) - 1; } if (pos == indexBlock.getIndex().size()) { if (parent != null) { throw new IllegalStateException(); } this.currentPos = pos; return this; } this.currentPos = pos; if (indexBlock.getLevel() == 0) { return this; } IndexEntry ie = indexBlock.getIndex().get(pos); Node child = new Node(this, getIndexBlock(ie)); return child.lookup(key); } private Node getLast() throws IOException { currentPos = indexBlock.getIndex().size() - 1; if (indexBlock.getLevel() == 0) { return this; } IndexEntry ie = indexBlock.getIndex().get(currentPos); Node child = new Node(this, getIndexBlock(ie)); return child.getLast(); } private Node getFirst() throws IOException { currentPos = 0; if (indexBlock.getLevel() == 0) { return this; } IndexEntry ie = indexBlock.getIndex().get(currentPos); Node child = new Node(this, getIndexBlock(ie)); return child.getFirst(); } private Node getPrevious() throws IOException { if (currentPos == 0) { return parent.getPrevious(); } currentPos--; IndexEntry ie = indexBlock.getIndex().get(currentPos); Node child = new Node(this, getIndexBlock(ie)); return child.getLast(); } private Node getNext() throws IOException { if (currentPos == indexBlock.getIndex().size() - 1) { return parent.getNext(); } currentPos++; IndexEntry ie = indexBlock.getIndex().get(currentPos); Node child = new Node(this, getIndexBlock(ie)); return child.getFirst(); } Node getNextNode() throws IOException { return parent.getNext(); } Node getPreviousNode() throws IOException { return parent.getPrevious(); } } public static class IndexIterator implements ListIterator<IndexEntry> { private Node node; private ListIterator<IndexEntry> liter; private Node getPrevNode() { try { return node.getPreviousNode(); } catch (IOException e) { throw new UncheckedIOException(e); } } private Node getNextNode() { try { return node.getNextNode(); } catch (IOException e) { throw new UncheckedIOException(e); } } public IndexIterator() { node = null; } public IndexIterator(Node node) { this.node = node; liter = node.indexBlock.getIndex().listIterator(node.currentPos); } @Override public boolean hasNext() { if (node == null) { return false; } if (liter.hasNext()) { return true; } else { return node.indexBlock.hasNext(); } } public IndexEntry peekPrevious() { IndexEntry ret = previous(); next(); return ret; } public IndexEntry peek() { IndexEntry ret = next(); previous(); return ret; } @Override public IndexEntry next() { if (!liter.hasNext()) { node = getNextNode(); liter = node.indexBlock.getIndex().listIterator(); } return liter.next(); } @Override public boolean hasPrevious() { if (node == null) { return false; } if (liter.hasPrevious()) { return true; } else { return node.indexBlock.getOffset() > 0; } } @Override public IndexEntry previous() { if (!liter.hasPrevious()) { node = getPrevNode(); liter = node.indexBlock.getIndex().listIterator(node.indexBlock.getIndex().size()); } return liter.previous(); } @Override public int nextIndex() { return node.indexBlock.getOffset() + liter.nextIndex(); } @Override public int previousIndex() { return node.indexBlock.getOffset() + liter.previousIndex(); } @Override public void remove() { throw new UnsupportedOperationException(); } @Override public void set(IndexEntry e) { throw new UnsupportedOperationException(); } @Override public void add(IndexEntry e) { throw new UnsupportedOperationException(); } } public Reader(CachableBlockFile.Reader blockStore, int version) { this.version = version; this.blockStore = blockStore; } private IndexBlock getIndexBlock(IndexEntry ie) throws IOException { IndexBlock iblock = new IndexBlock(); CachableBlockFile.CachedBlockRead in = blockStore.getMetaBlock(ie.getOffset(), ie.getCompressedSize(), ie.getRawSize()); iblock.readFields(in, version); in.close(); return iblock; } public IndexIterator lookup(Key key) throws IOException { Node node = new Node(rootBlock); return new IndexIterator(node.lookup(key)); } public void readFields(DataInput in) throws IOException { size = 0; if (version == RFile.RINDEX_VER_6 || version == RFile.RINDEX_VER_7 || version == RFile.RINDEX_VER_8) { size = in.readInt(); } rootBlock = new IndexBlock(); rootBlock.readFields(in, version); if (version == RFile.RINDEX_VER_3 || version == RFile.RINDEX_VER_4) { size = rootBlock.getIndex().size(); } } public int size() { return size; } private void getIndexInfo(IndexBlock ib, Map<Integer,Long> sizesByLevel, Map<Integer,Long> countsByLevel) throws IOException { Long size = sizesByLevel.get(ib.getLevel()); if (size == null) { size = 0L; } Long count = countsByLevel.get(ib.getLevel()); if (count == null) { count = 0L; } SerializedIndex index = ib.getIndex(); size += index.sizeInBytes(); count++; sizesByLevel.put(ib.getLevel(), size); countsByLevel.put(ib.getLevel(), count); if (ib.getLevel() > 0) { for (IndexEntry ie : index) { IndexBlock cib = getIndexBlock(ie); getIndexInfo(cib, sizesByLevel, countsByLevel); } } } public void getIndexInfo(Map<Integer,Long> sizes, Map<Integer,Long> counts) throws IOException { getIndexInfo(rootBlock, sizes, counts); } private void printIndex(IndexBlock ib, String prefix, PrintStream out) throws IOException { List<IndexEntry> index = ib.getIndex(); StringBuilder sb = new StringBuilder(); sb.append(prefix); sb.append("Level: "); sb.append(ib.getLevel()); int resetLen = sb.length(); String recursePrefix = prefix + " "; for (IndexEntry ie : index) { sb.setLength(resetLen); sb.append(" Key: "); sb.append(ie.key); sb.append(" NumEntries: "); sb.append(ie.entries); sb.append(" Offset: "); sb.append(ie.offset); sb.append(" CompressedSize: "); sb.append(ie.compressedSize); sb.append(" RawSize : "); sb.append(ie.rawSize); out.println(sb); if (ib.getLevel() > 0) { IndexBlock cib = getIndexBlock(ie); printIndex(cib, recursePrefix, out); } } } public void printIndex(String prefix, PrintStream out) throws IOException { printIndex(rootBlock, prefix, out); } public Key getLastKey() { return rootBlock.getIndex().get(rootBlock.getIndex().size() - 1).getKey(); } } }
9,652
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/IndexIterator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import java.io.IOException; import java.util.Collection; import java.util.Iterator; import java.util.Map; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.file.rfile.MultiLevelIndex.IndexEntry; import org.apache.accumulo.core.iterators.IteratorEnvironment; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; class IndexIterator implements SortedKeyValueIterator<Key,Value> { private Key key; private Iterator<IndexEntry> indexIter; IndexIterator(Iterator<IndexEntry> indexIter) { this.indexIter = indexIter; if (indexIter.hasNext()) { key = indexIter.next().getKey(); } else { key = null; } } @Override public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) { throw new UnsupportedOperationException(); } @Override public Key getTopKey() { return key; } @Override public Value getTopValue() { throw new UnsupportedOperationException(); } @Override public boolean hasTop() { return key != null; } @Override public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException { throw new UnsupportedOperationException(); } @Override public void next() throws IOException { if (indexIter.hasNext()) { key = indexIter.next().getKey(); } else { key = null; } } @Override public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException { throw new UnsupportedOperationException(); } }
9,653
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import static java.util.Objects.requireNonNull; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; import java.io.PrintStream; import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.accumulo.core.client.SampleNotPresentException; import org.apache.accumulo.core.client.sample.Sampler; import org.apache.accumulo.core.client.sample.SamplerConfiguration; import org.apache.accumulo.core.conf.DefaultConfiguration; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.ArrayByteSequence; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.file.FileSKVIterator; import org.apache.accumulo.core.file.FileSKVWriter; import org.apache.accumulo.core.file.NoSuchMetaStoreException; import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile; import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachableBuilder; import org.apache.accumulo.core.file.blockfile.impl.CacheProvider; import org.apache.accumulo.core.file.rfile.BlockIndex.BlockIndexEntry; import org.apache.accumulo.core.file.rfile.MultiLevelIndex.IndexEntry; import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Reader.IndexIterator; import org.apache.accumulo.core.file.rfile.RelativeKey.SkippR; import org.apache.accumulo.core.file.rfile.bcfile.BCFile; import org.apache.accumulo.core.file.rfile.bcfile.BCFile.Writer.BlockAppender; import org.apache.accumulo.core.file.rfile.bcfile.MetaBlockDoesNotExist; import org.apache.accumulo.core.iterators.IteratorEnvironment; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; import org.apache.accumulo.core.iteratorsImpl.system.HeapIterator; import org.apache.accumulo.core.iteratorsImpl.system.InterruptibleIterator; import org.apache.accumulo.core.iteratorsImpl.system.IterationInterruptedException; import org.apache.accumulo.core.iteratorsImpl.system.LocalityGroupIterator; import org.apache.accumulo.core.iteratorsImpl.system.LocalityGroupIterator.LocalityGroup; import org.apache.accumulo.core.iteratorsImpl.system.LocalityGroupIterator.LocalityGroupContext; import org.apache.accumulo.core.iteratorsImpl.system.LocalityGroupIterator.LocalityGroupSeekCache; import org.apache.accumulo.core.metadata.TabletFile; import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl; import org.apache.accumulo.core.util.LocalityGroupUtil; import org.apache.accumulo.core.util.MutableByteSequence; import org.apache.commons.lang3.mutable.MutableLong; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Supplier; import com.google.common.base.Suppliers; public class RFile { public static final String EXTENSION = "rf"; private static final Logger log = LoggerFactory.getLogger(RFile.class); private RFile() {} private static final int RINDEX_MAGIC = 0x20637474; static final int RINDEX_VER_8 = 8; // Added sample storage. There is a sample locality group for // each locality group. Sample are built using a Sampler and // sampler configuration. The Sampler and its configuration are // stored in RFile. Persisting the method of producing the // sample allows a user of RFile to determine if the sample is // useful. // // Selected smaller keys for index by doing two things. First // internal stats were used to look for keys that were below // average in size for the index. Also keys that were // statistically large were excluded from the index. Second // shorter keys // (that may not exist in data) were generated for the index. static final int RINDEX_VER_7 = 7; // Added support for prefix encoding and encryption. Before // this change only exact matches within a key field were // deduped // for consecutive keys. After this change, if consecutive key // fields have the same prefix then the prefix is only stored // once. static final int RINDEX_VER_6 = 6; // Added support for multilevel indexes. Before this the index // was one list with an entry for each data block. For large // files, a large index needed to be read into memory before // any seek could be done. After this change the index is a fat // tree, and opening a large rfile is much faster. Like the // previous version of Rfile, each index node in the tree is // kept // in memory serialized and used in its serialized form. // static final int RINDEX_VER_5 = 5; // unreleased static final int RINDEX_VER_4 = 4; // Added support for seeking using serialized indexes. After // this change index is no longer deserialized when rfile // opened. // Entire serialized index is read into memory as single byte // array. For seeks, serialized index is used to find blocks // (the binary search deserializes the specific entries its // needs). This resulted in less memory usage (no object // overhead) // and faster open times for RFiles. static final int RINDEX_VER_3 = 3; // Initial released version of RFile. R is for relative // encoding. A keys is encoded relative to the previous key. // The // initial version deduped key fields that were the same for // consecutive keys. For sorted data this is a common // occurrence. // This version supports locality groups. Each locality group // has an index pointing to set of data blocks. Each data block // contains relatively encoded keys and values. // Buffer sample data so that many sample data blocks are stored contiguously. private static int sampleBufferSize = 10000000; @VisibleForTesting public static void setSampleBufferSize(int bufferSize) { sampleBufferSize = bufferSize; } private static class LocalityGroupMetadata implements Writable { private int startBlock = -1; private Key firstKey; private Map<ByteSequence,MutableLong> columnFamilies; private boolean isDefaultLG = false; private String name; private Set<ByteSequence> previousColumnFamilies; private MultiLevelIndex.BufferedWriter indexWriter; private MultiLevelIndex.Reader indexReader; private int version; public LocalityGroupMetadata(int version, CachableBlockFile.Reader br) { columnFamilies = new HashMap<>(); indexReader = new MultiLevelIndex.Reader(br, version); this.version = version; } public LocalityGroupMetadata(Set<ByteSequence> pcf, int indexBlockSize, BCFile.Writer bfw) { isDefaultLG = true; columnFamilies = new HashMap<>(); previousColumnFamilies = pcf; indexWriter = new MultiLevelIndex.BufferedWriter(new MultiLevelIndex.Writer(bfw, indexBlockSize)); } public LocalityGroupMetadata(String name, Set<ByteSequence> cfset, int indexBlockSize, BCFile.Writer bfw) { this.name = name; isDefaultLG = false; columnFamilies = new HashMap<>(); for (ByteSequence cf : cfset) { columnFamilies.put(cf, new MutableLong(0)); } indexWriter = new MultiLevelIndex.BufferedWriter(new MultiLevelIndex.Writer(bfw, indexBlockSize)); } private Key getFirstKey() { return firstKey; } private void setFirstKey(Key key) { if (firstKey != null) { throw new IllegalStateException(); } this.firstKey = new Key(key); } public void updateColumnCount(Key key) { if (isDefaultLG && columnFamilies == null) { if (!previousColumnFamilies.isEmpty()) { // only do this check when there are previous column families ByteSequence cf = key.getColumnFamilyData(); if (previousColumnFamilies.contains(cf)) { throw new IllegalArgumentException("Added column family \"" + cf + "\" to default locality group that was in previous locality group"); } } // no longer keeping track of column families, so return return; } ByteSequence cf = key.getColumnFamilyData(); MutableLong count = columnFamilies.get(cf); if (count == null) { if (!isDefaultLG) { throw new IllegalArgumentException("invalid column family : " + cf); } if (previousColumnFamilies.contains(cf)) { throw new IllegalArgumentException("Added column family \"" + cf + "\" to default locality group that was in previous locality group"); } if (columnFamilies.size() > Writer.MAX_CF_IN_DLG) { // stop keeping track, there are too many columnFamilies = null; return; } count = new MutableLong(0); columnFamilies.put(new ArrayByteSequence(cf.getBackingArray(), cf.offset(), cf.length()), count); } count.increment(); } @Override public void readFields(DataInput in) throws IOException { isDefaultLG = in.readBoolean(); if (!isDefaultLG) { name = in.readUTF(); } if (version == RINDEX_VER_3 || version == RINDEX_VER_4 || version == RINDEX_VER_6 || version == RINDEX_VER_7) { startBlock = in.readInt(); } int size = in.readInt(); if (size == -1) { if (!isDefaultLG) { throw new IllegalStateException( "Non default LG " + name + " does not have column families"); } columnFamilies = null; } else { if (columnFamilies == null) { columnFamilies = new HashMap<>(); } else { columnFamilies.clear(); } for (int i = 0; i < size; i++) { int len = in.readInt(); byte[] cf = new byte[len]; in.readFully(cf); long count = in.readLong(); columnFamilies.put(new ArrayByteSequence(cf), new MutableLong(count)); } } if (in.readBoolean()) { firstKey = new Key(); firstKey.readFields(in); } else { firstKey = null; } indexReader.readFields(in); } @Override public void write(DataOutput out) throws IOException { out.writeBoolean(isDefaultLG); if (!isDefaultLG) { out.writeUTF(name); } if (isDefaultLG && columnFamilies == null) { // only expect null when default LG, otherwise let a NPE occur out.writeInt(-1); } else { out.writeInt(columnFamilies.size()); for (Entry<ByteSequence,MutableLong> entry : columnFamilies.entrySet()) { out.writeInt(entry.getKey().length()); out.write(entry.getKey().getBackingArray(), entry.getKey().offset(), entry.getKey().length()); out.writeLong(entry.getValue().longValue()); } } out.writeBoolean(firstKey != null); if (firstKey != null) { firstKey.write(out); } indexWriter.close(out); } public void printInfo(boolean isSample, boolean includeIndexDetails) throws IOException { PrintStream out = System.out; out.printf("%-24s : %s\n", (isSample ? "Sample " : "") + "Locality group ", (isDefaultLG ? "<DEFAULT>" : name)); if (version == RINDEX_VER_3 || version == RINDEX_VER_4 || version == RINDEX_VER_6 || version == RINDEX_VER_7) { out.printf("\t%-22s : %d\n", "Start block", startBlock); } out.printf("\t%-22s : %,d\n", "Num blocks", indexReader.size()); TreeMap<Integer,Long> sizesByLevel = new TreeMap<>(); TreeMap<Integer,Long> countsByLevel = new TreeMap<>(); indexReader.getIndexInfo(sizesByLevel, countsByLevel); for (Entry<Integer,Long> entry : sizesByLevel.descendingMap().entrySet()) { out.printf("\t%-22s : %,d bytes %,d blocks\n", "Index level " + entry.getKey(), entry.getValue(), countsByLevel.get(entry.getKey())); } out.printf("\t%-22s : %s\n", "First key", firstKey); Key lastKey = null; if (indexReader.size() > 0) { lastKey = indexReader.getLastKey(); } out.printf("\t%-22s : %s\n", "Last key", lastKey); long numKeys = 0; IndexIterator countIter = indexReader.lookup(new Key()); while (countIter.hasNext()) { IndexEntry indexEntry = countIter.next(); numKeys += indexEntry.getNumEntries(); } out.printf("\t%-22s : %,d\n", "Num entries", numKeys); out.printf("\t%-22s : %s\n", "Column families", (isDefaultLG && columnFamilies == null ? "<UNKNOWN>" : columnFamilies.keySet())); if (includeIndexDetails) { out.printf("\t%-22s :\nIndex Entries", lastKey); String prefix = "\t "; indexReader.printIndex(prefix, out); } } } private static class SampleEntry { Key key; Value val; SampleEntry(Key key, Value val) { this.key = new Key(key); this.val = new Value(val); } } private static class SampleLocalityGroupWriter { private Sampler sampler; private List<SampleEntry> entries = new ArrayList<>(); private long dataSize = 0; private LocalityGroupWriter lgr; public SampleLocalityGroupWriter(LocalityGroupWriter lgr, Sampler sampler) { this.lgr = lgr; this.sampler = sampler; } public void append(Key key, Value value) { if (sampler.accept(key)) { entries.add(new SampleEntry(key, value)); dataSize += key.getSize() + value.getSize(); } } public void close() throws IOException { for (SampleEntry se : entries) { lgr.append(se.key, se.val); } lgr.close(); } public void flushIfNeeded() throws IOException { if (dataSize > sampleBufferSize) { // the reason to write out all but one key is so that closeBlock() can always eventually be // called with true List<SampleEntry> subList = entries.subList(0, entries.size() - 1); if (!subList.isEmpty()) { for (SampleEntry se : subList) { lgr.append(se.key, se.val); } lgr.closeBlock(subList.get(subList.size() - 1).key, false); subList.clear(); dataSize = 0; } } } } private static class LocalityGroupWriter { private BCFile.Writer fileWriter; private BlockAppender blockWriter; private final long blockSize; private final long maxBlockSize; private int entries = 0; private LocalityGroupMetadata currentLocalityGroup = null; private Key lastKeyInBlock = null; private Key prevKey = new Key(); private SampleLocalityGroupWriter sample; // Use windowed stats to fix ACCUMULO-4669 private RollingStats keyLenStats = new RollingStats(2017); private double averageKeySize = 0; LocalityGroupWriter(BCFile.Writer fileWriter, long blockSize, long maxBlockSize, LocalityGroupMetadata currentLocalityGroup, SampleLocalityGroupWriter sample) { this.fileWriter = fileWriter; this.blockSize = blockSize; this.maxBlockSize = maxBlockSize; this.currentLocalityGroup = currentLocalityGroup; this.sample = sample; } private boolean isGiantKey(Key k) { double mean = keyLenStats.getMean(); double stddev = keyLenStats.getStandardDeviation(); return k.getSize() > mean + Math.max(9 * mean, 4 * stddev); } public void append(Key key, Value value) throws IOException { if (key.compareTo(prevKey) < 0) { throw new IllegalArgumentException( "Keys appended out-of-order. New key " + key + ", previous key " + prevKey); } currentLocalityGroup.updateColumnCount(key); if (currentLocalityGroup.getFirstKey() == null) { currentLocalityGroup.setFirstKey(key); } if (sample != null) { sample.append(key, value); } if (blockWriter == null) { blockWriter = fileWriter.prepareDataBlock(); } else if (blockWriter.getRawSize() > blockSize) { // Look for a key that's short to put in the index, defining short as average or below. if (averageKeySize == 0) { // use the same average for the search for a below average key for a block averageKeySize = keyLenStats.getMean(); } // Possibly produce a shorter key that does not exist in data. Even if a key can be // shortened, it may not be below average. Key closeKey = KeyShortener.shorten(prevKey, key); if ((closeKey.getSize() <= averageKeySize || blockWriter.getRawSize() > maxBlockSize) && !isGiantKey(closeKey)) { closeBlock(closeKey, false); blockWriter = fileWriter.prepareDataBlock(); // set average to zero so its recomputed for the next block averageKeySize = 0; // To constrain the growth of data blocks, we limit our worst case scenarios to closing // blocks if they reach the maximum configurable block size of Integer.MAX_VALUE. // 128 bytes added for metadata overhead } else if (((long) key.getSize() + (long) value.getSize() + blockWriter.getRawSize() + 128L) >= Integer.MAX_VALUE) { closeBlock(closeKey, false); blockWriter = fileWriter.prepareDataBlock(); averageKeySize = 0; } } RelativeKey rk = new RelativeKey(lastKeyInBlock, key); rk.write(blockWriter); value.write(blockWriter); entries++; keyLenStats.addValue(key.getSize()); prevKey = new Key(key); lastKeyInBlock = prevKey; } private void closeBlock(Key key, boolean lastBlock) throws IOException { blockWriter.close(); if (lastBlock) { currentLocalityGroup.indexWriter.addLast(key, entries, blockWriter.getStartPos(), blockWriter.getCompressedSize(), blockWriter.getRawSize()); } else { currentLocalityGroup.indexWriter.add(key, entries, blockWriter.getStartPos(), blockWriter.getCompressedSize(), blockWriter.getRawSize()); } if (sample != null) { sample.flushIfNeeded(); } blockWriter = null; lastKeyInBlock = null; entries = 0; } public void close() throws IOException { if (blockWriter != null) { closeBlock(lastKeyInBlock, true); } if (sample != null) { sample.close(); } } } public static class Writer implements FileSKVWriter { public static final int MAX_CF_IN_DLG = 1000; private static final double MAX_BLOCK_MULTIPLIER = 1.1; private BCFile.Writer fileWriter; private final long blockSize; private final long maxBlockSize; private final int indexBlockSize; private ArrayList<LocalityGroupMetadata> localityGroups = new ArrayList<>(); private ArrayList<LocalityGroupMetadata> sampleGroups = new ArrayList<>(); private LocalityGroupMetadata currentLocalityGroup = null; private LocalityGroupMetadata sampleLocalityGroup = null; private boolean dataClosed = false; private boolean closed = false; private boolean startedDefaultLocalityGroup = false; private HashSet<ByteSequence> previousColumnFamilies; private long length = -1; private LocalityGroupWriter lgWriter; private SamplerConfigurationImpl samplerConfig; private Sampler sampler; public Writer(BCFile.Writer bfw, int blockSize) throws IOException { this(bfw, blockSize, (int) DefaultConfiguration.getInstance() .getAsBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX), null, null); } public Writer(BCFile.Writer bfw, int blockSize, int indexBlockSize, SamplerConfigurationImpl samplerConfig, Sampler sampler) { this.blockSize = blockSize; this.maxBlockSize = (long) (blockSize * MAX_BLOCK_MULTIPLIER); this.indexBlockSize = indexBlockSize; this.fileWriter = bfw; previousColumnFamilies = new HashSet<>(); this.samplerConfig = samplerConfig; this.sampler = sampler; } @Override public synchronized void close() throws IOException { if (closed) { return; } closeData(); BlockAppender mba = fileWriter.prepareMetaBlock("RFile.index"); mba.writeInt(RINDEX_MAGIC); mba.writeInt(RINDEX_VER_8); if (currentLocalityGroup != null) { localityGroups.add(currentLocalityGroup); sampleGroups.add(sampleLocalityGroup); } mba.writeInt(localityGroups.size()); for (LocalityGroupMetadata lc : localityGroups) { lc.write(mba); } if (samplerConfig == null) { mba.writeBoolean(false); } else { mba.writeBoolean(true); for (LocalityGroupMetadata lc : sampleGroups) { lc.write(mba); } samplerConfig.write(mba); } mba.close(); fileWriter.close(); length = fileWriter.getLength(); closed = true; } private void closeData() throws IOException { if (dataClosed) { return; } dataClosed = true; if (lgWriter != null) { lgWriter.close(); } } @Override public void append(Key key, Value value) throws IOException { if (dataClosed) { throw new IllegalStateException("Cannot append, data closed"); } lgWriter.append(key, value); } @Override public DataOutputStream createMetaStore(String name) throws IOException { closeData(); return fileWriter.prepareMetaBlock(name); } private void _startNewLocalityGroup(String name, Set<ByteSequence> columnFamilies) throws IOException { if (dataClosed) { throw new IllegalStateException("data closed"); } if (startedDefaultLocalityGroup) { throw new IllegalStateException( "Can not start anymore new locality groups after default locality group started"); } if (lgWriter != null) { lgWriter.close(); } if (currentLocalityGroup != null) { localityGroups.add(currentLocalityGroup); sampleGroups.add(sampleLocalityGroup); } if (columnFamilies == null) { startedDefaultLocalityGroup = true; currentLocalityGroup = new LocalityGroupMetadata(previousColumnFamilies, indexBlockSize, fileWriter); sampleLocalityGroup = new LocalityGroupMetadata(previousColumnFamilies, indexBlockSize, fileWriter); } else { if (!Collections.disjoint(columnFamilies, previousColumnFamilies)) { HashSet<ByteSequence> overlap = new HashSet<>(columnFamilies); overlap.retainAll(previousColumnFamilies); throw new IllegalArgumentException( "Column families over lap with previous locality group : " + overlap); } currentLocalityGroup = new LocalityGroupMetadata(name, columnFamilies, indexBlockSize, fileWriter); sampleLocalityGroup = new LocalityGroupMetadata(name, columnFamilies, indexBlockSize, fileWriter); previousColumnFamilies.addAll(columnFamilies); } SampleLocalityGroupWriter sampleWriter = null; if (sampler != null) { sampleWriter = new SampleLocalityGroupWriter( new LocalityGroupWriter(fileWriter, blockSize, maxBlockSize, sampleLocalityGroup, null), sampler); } lgWriter = new LocalityGroupWriter(fileWriter, blockSize, maxBlockSize, currentLocalityGroup, sampleWriter); } @Override public void startNewLocalityGroup(String name, Set<ByteSequence> columnFamilies) throws IOException { if (columnFamilies == null) { throw new NullPointerException(); } _startNewLocalityGroup(name, columnFamilies); } @Override public void startDefaultLocalityGroup() throws IOException { _startNewLocalityGroup(null, null); } @Override public boolean supportsLocalityGroups() { return true; } @Override public long getLength() { if (!closed) { return fileWriter.getLength(); } return length; } } private static class LocalityGroupReader extends LocalityGroup implements FileSKVIterator { private CachableBlockFile.Reader reader; private MultiLevelIndex.Reader index; private int blockCount; private Key firstKey; private int startBlock; private boolean closed = false; private int version; private boolean checkRange = true; private LocalityGroupReader(CachableBlockFile.Reader reader, LocalityGroupMetadata lgm, int version) { super(lgm.columnFamilies, lgm.isDefaultLG); this.firstKey = lgm.firstKey; this.index = lgm.indexReader; this.startBlock = lgm.startBlock; blockCount = index.size(); this.version = version; this.reader = reader; } public LocalityGroupReader(LocalityGroupReader lgr) { super(lgr.columnFamilies, lgr.isDefaultLocalityGroup); this.firstKey = lgr.firstKey; this.index = lgr.index; this.startBlock = lgr.startBlock; this.blockCount = lgr.blockCount; this.reader = lgr.reader; this.version = lgr.version; } Iterator<IndexEntry> getIndex() throws IOException { return index.lookup(new Key()); } @Override public void close() throws IOException { closed = true; hasTop = false; if (currBlock != null) { currBlock.close(); } } private IndexIterator iiter; private int entriesLeft; private CachableBlockFile.CachedBlockRead currBlock; private RelativeKey rk; private Value val; private Key prevKey = null; private Range range = null; private boolean hasTop = false; private AtomicBoolean interruptFlag; @Override public Key getTopKey() { return rk.getKey(); } @Override public Value getTopValue() { return val; } @Override public boolean hasTop() { return hasTop; } @Override public void next() throws IOException { try { _next(); } catch (IOException | RuntimeException ioe) { reset(true); throw ioe; } } private void _next() throws IOException { if (!hasTop) { throw new IllegalStateException(); } if (entriesLeft == 0) { currBlock.close(); if (metricsGatherer != null) { metricsGatherer.startBlock(); } if (iiter.hasNext()) { IndexEntry indexEntry = iiter.next(); entriesLeft = indexEntry.getNumEntries(); currBlock = getDataBlock(indexEntry); checkRange = range.afterEndKey(indexEntry.getKey()); if (!checkRange) { hasTop = true; } } else { rk = null; val = null; hasTop = false; return; } } prevKey = rk.getKey(); rk.readFields(currBlock); val.readFields(currBlock); if (metricsGatherer != null) { metricsGatherer.addMetric(rk.getKey(), val); } entriesLeft--; if (checkRange) { hasTop = !range.afterEndKey(rk.getKey()); } } private CachableBlockFile.CachedBlockRead getDataBlock(IndexEntry indexEntry) throws IOException { if (interruptFlag != null && interruptFlag.get()) { throw new IterationInterruptedException(); } if (version == RINDEX_VER_3 || version == RINDEX_VER_4) { return reader.getDataBlock(startBlock + iiter.previousIndex()); } else { return reader.getDataBlock(indexEntry.getOffset(), indexEntry.getCompressedSize(), indexEntry.getRawSize()); } } @Override public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException { if (closed) { throw new IllegalStateException("Locality group reader closed"); } if (!columnFamilies.isEmpty() || inclusive) { throw new IllegalArgumentException("I do not know how to filter column families"); } if (interruptFlag != null && interruptFlag.get()) { throw new IterationInterruptedException(); } try { _seek(range); } catch (IOException | RuntimeException ioe) { reset(true); throw ioe; } } private void reset(boolean exceptionThrown) { rk = null; hasTop = false; if (currBlock != null) { try { try { currBlock.close(); if (exceptionThrown) { reader.close(); } } catch (IOException e) { log.warn("Failed to close block reader", e); } } finally { currBlock = null; } } } private void _seek(Range range) throws IOException { this.range = range; this.checkRange = true; if (blockCount == 0) { // its an empty file rk = null; return; } Key startKey = range.getStartKey(); if (startKey == null) { startKey = new Key(); } boolean reseek = true; if (range.afterEndKey(firstKey)) { // range is before first key in rfile, so there is nothing to do reset(false); reseek = false; } if (rk != null) { if (range.beforeStartKey(prevKey) && range.afterEndKey(getTopKey())) { // range is between the two keys in the file where the last range seeked to stopped, so // there is // nothing to do reseek = false; } if (startKey.compareTo(getTopKey()) <= 0 && startKey.compareTo(prevKey) > 0) { // current location in file can satisfy this request, no need to seek reseek = false; } if (entriesLeft > 0 && startKey.compareTo(getTopKey()) >= 0 && startKey.compareTo(iiter.peekPrevious().getKey()) <= 0) { // start key is within the unconsumed portion of the current block // this code intentionally does not use the index associated with a cached block // because if only forward seeks are being done, then there is no benefit to building // and index for the block... could consider using the index if it exist but not // causing the build of an index... doing this could slow down some use cases and // and speed up others. MutableByteSequence valbs = new MutableByteSequence(new byte[64], 0, 0); SkippR skippr = RelativeKey.fastSkip(currBlock, startKey, valbs, prevKey, getTopKey(), entriesLeft); if (skippr.skipped > 0) { entriesLeft -= skippr.skipped; val = new Value(valbs.toArray()); prevKey = skippr.prevKey; rk = skippr.rk; } reseek = false; } if (entriesLeft == 0 && startKey.compareTo(getTopKey()) > 0 && startKey.compareTo(iiter.peekPrevious().getKey()) <= 0) { // In the empty space at the end of a block. This can occur when keys are shortened in the // index creating index entries that do not exist in the // block. These shortened index entries fall between the last key in a block and first key // in the next block, but may not exist in the data. // Just proceed to the next block. reseek = false; } if (iiter.previousIndex() == 0 && getTopKey().equals(firstKey) && startKey.compareTo(firstKey) <= 0) { // seeking before the beginning of the file, and already positioned at the first key in // the file // so there is nothing to do reseek = false; } } if (reseek) { iiter = index.lookup(startKey); reset(false); if (iiter.hasNext()) { // if the index contains the same key multiple times, then go to the // earliest index entry containing the key while (iiter.hasPrevious() && iiter.peekPrevious().getKey().equals(iiter.peek().getKey())) { iiter.previous(); } if (iiter.hasPrevious()) { prevKey = new Key(iiter.peekPrevious().getKey()); // initially prevKey is the last key // of the prev block } else { prevKey = new Key(); // first block in the file, so set prev key to minimal key } IndexEntry indexEntry = iiter.next(); entriesLeft = indexEntry.getNumEntries(); currBlock = getDataBlock(indexEntry); checkRange = range.afterEndKey(indexEntry.getKey()); if (!checkRange) { hasTop = true; } MutableByteSequence valbs = new MutableByteSequence(new byte[64], 0, 0); Key currKey = null; if (currBlock.isIndexable()) { BlockIndex blockIndex = BlockIndex.getIndex(currBlock, indexEntry); if (blockIndex != null) { BlockIndexEntry bie = blockIndex.seekBlock(startKey, currBlock); if (bie != null) { // we are seeked to the current position of the key in the index // need to prime the read process and read this key from the block RelativeKey tmpRk = new RelativeKey(); tmpRk.setPrevKey(bie.getPrevKey()); tmpRk.readFields(currBlock); val = new Value(); val.readFields(currBlock); valbs = new MutableByteSequence(val.get(), 0, val.getSize()); // just consumed one key from the input stream, so subtract one from entries left entriesLeft = bie.getEntriesLeft() - 1; prevKey = new Key(bie.getPrevKey()); currKey = tmpRk.getKey(); } } } SkippR skippr = RelativeKey.fastSkip(currBlock, startKey, valbs, prevKey, currKey, entriesLeft); prevKey = skippr.prevKey; entriesLeft -= skippr.skipped; val = new Value(valbs.toArray()); // set rk when everything above is successful, if exception // occurs rk will not be set rk = skippr.rk; } else { // past the last key } } hasTop = rk != null && !range.afterEndKey(rk.getKey()); while (hasTop() && range.beforeStartKey(getTopKey())) { next(); } if (metricsGatherer != null) { metricsGatherer.startLocalityGroup(rk.getKey().getColumnFamily()); metricsGatherer.addMetric(rk.getKey(), val); } } @Override public Text getFirstRow() { return firstKey != null ? firstKey.getRow() : null; } @Override public Text getLastRow() { if (index.size() == 0) { return null; } return index.getLastKey().getRow(); } @Override public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) { throw new UnsupportedOperationException(); } @Override public void closeDeepCopies() { throw new UnsupportedOperationException(); } @Override public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) { throw new UnsupportedOperationException(); } @Override public DataInputStream getMetaStore(String name) { throw new UnsupportedOperationException(); } @Override public void setInterruptFlag(AtomicBoolean flag) { this.interruptFlag = flag; } @Override public InterruptibleIterator getIterator() { return this; } private MetricsGatherer<?> metricsGatherer; public void registerMetrics(MetricsGatherer<?> vmg) { metricsGatherer = vmg; } @Override public FileSKVIterator getSample(SamplerConfigurationImpl sampleConfig) { throw new UnsupportedOperationException(); } @Override public void setCacheProvider(CacheProvider cacheProvider) { throw new UnsupportedOperationException(); } } public static class Reader extends HeapIterator implements RFileSKVIterator { private final CachableBlockFile.Reader reader; private final ArrayList<LocalityGroupMetadata> localityGroups = new ArrayList<>(); private final ArrayList<LocalityGroupMetadata> sampleGroups = new ArrayList<>(); private final LocalityGroupReader[] currentReaders; private final LocalityGroupReader[] readers; private final LocalityGroupReader[] sampleReaders; private final LocalityGroupContext lgContext; private LocalityGroupSeekCache lgCache; private List<Reader> deepCopies; private boolean deepCopy = false; private AtomicBoolean interruptFlag; private SamplerConfigurationImpl samplerConfig = null; private int rfileVersion; public Reader(CachableBlockFile.Reader rdr) throws IOException { this.reader = rdr; try (CachableBlockFile.CachedBlockRead mb = reader.getMetaBlock("RFile.index")) { int magic = mb.readInt(); int ver = mb.readInt(); rfileVersion = ver; if (magic != RINDEX_MAGIC) { throw new IOException("Did not see expected magic number, saw " + magic); } if (ver != RINDEX_VER_8 && ver != RINDEX_VER_7 && ver != RINDEX_VER_6 && ver != RINDEX_VER_4 && ver != RINDEX_VER_3) { throw new IOException("Did not see expected version, saw " + ver); } int size = mb.readInt(); currentReaders = new LocalityGroupReader[size]; deepCopies = new LinkedList<>(); for (int i = 0; i < size; i++) { LocalityGroupMetadata lgm = new LocalityGroupMetadata(ver, rdr); lgm.readFields(mb); localityGroups.add(lgm); currentReaders[i] = new LocalityGroupReader(reader, lgm, ver); } readers = currentReaders; if (ver == RINDEX_VER_8 && mb.readBoolean()) { sampleReaders = new LocalityGroupReader[size]; for (int i = 0; i < size; i++) { LocalityGroupMetadata lgm = new LocalityGroupMetadata(ver, rdr); lgm.readFields(mb); sampleGroups.add(lgm); sampleReaders[i] = new LocalityGroupReader(reader, lgm, ver); } samplerConfig = new SamplerConfigurationImpl(mb); } else { sampleReaders = null; samplerConfig = null; } } lgContext = new LocalityGroupContext(currentReaders); createHeap(currentReaders.length); } private Reader(Reader r, LocalityGroupReader[] sampleReaders) { super(sampleReaders.length); this.reader = r.reader; this.currentReaders = new LocalityGroupReader[sampleReaders.length]; this.deepCopies = r.deepCopies; this.deepCopy = false; this.readers = r.readers; this.sampleReaders = r.sampleReaders; this.samplerConfig = r.samplerConfig; this.rfileVersion = r.rfileVersion; for (int i = 0; i < sampleReaders.length; i++) { this.currentReaders[i] = sampleReaders[i]; this.currentReaders[i].setInterruptFlag(r.interruptFlag); } this.lgContext = new LocalityGroupContext(currentReaders); } private Reader(Reader r, boolean useSample) { super(r.currentReaders.length); this.reader = r.reader; this.currentReaders = new LocalityGroupReader[r.currentReaders.length]; this.deepCopies = r.deepCopies; this.deepCopy = true; this.samplerConfig = r.samplerConfig; this.rfileVersion = r.rfileVersion; this.readers = r.readers; this.sampleReaders = r.sampleReaders; for (int i = 0; i < r.readers.length; i++) { if (useSample) { this.currentReaders[i] = new LocalityGroupReader(r.sampleReaders[i]); this.currentReaders[i].setInterruptFlag(r.interruptFlag); } else { this.currentReaders[i] = new LocalityGroupReader(r.readers[i]); this.currentReaders[i].setInterruptFlag(r.interruptFlag); } } this.lgContext = new LocalityGroupContext(currentReaders); } public Reader(CachableBlockFile.CachableBuilder b) throws IOException { this(new CachableBlockFile.Reader(b)); } private void closeLocalityGroupReaders() { for (LocalityGroupReader lgr : currentReaders) { try { lgr.close(); } catch (IOException e) { log.warn("Errored out attempting to close LocalityGroupReader.", e); } } } @Override public void closeDeepCopies() { if (deepCopy) { throw new IllegalStateException("Calling closeDeepCopies on a deep copy is not supported"); } for (Reader deepCopy : deepCopies) { deepCopy.closeLocalityGroupReaders(); } deepCopies.clear(); } @Override public void close() throws IOException { if (deepCopy) { throw new IllegalStateException("Calling close on a deep copy is not supported"); } closeDeepCopies(); closeLocalityGroupReaders(); if (sampleReaders != null) { for (LocalityGroupReader lgr : sampleReaders) { try { lgr.close(); } catch (IOException e) { log.warn("Errored out attempting to close LocalityGroupReader.", e); } } } try { reader.close(); } finally { /** * input Stream is passed to CachableBlockFile and closed there */ } } @Override public Text getFirstRow() throws IOException { if (currentReaders.length == 0) { return null; } Text minRow = null; for (LocalityGroupReader currentReader : currentReaders) { if (minRow == null) { minRow = currentReader.getFirstRow(); } else { Text firstRow = currentReader.getFirstRow(); if (firstRow != null && firstRow.compareTo(minRow) < 0) { minRow = firstRow; } } } return minRow; } @Override public Text getLastRow() throws IOException { if (currentReaders.length == 0) { return null; } Text maxRow = null; for (LocalityGroupReader currentReader : currentReaders) { if (maxRow == null) { maxRow = currentReader.getLastRow(); } else { Text lastRow = currentReader.getLastRow(); if (lastRow != null && lastRow.compareTo(maxRow) > 0) { maxRow = lastRow; } } } return maxRow; } @Override public DataInputStream getMetaStore(String name) throws IOException, NoSuchMetaStoreException { try { return this.reader.getMetaBlock(name); } catch (MetaBlockDoesNotExist e) { throw new NoSuchMetaStoreException("name = " + name, e); } } @Override public Reader deepCopy(IteratorEnvironment env) { if (env != null && env.isSamplingEnabled()) { SamplerConfiguration sc = env.getSamplerConfiguration(); if (sc == null) { throw new SampleNotPresentException(); } if (this.samplerConfig != null && this.samplerConfig.equals(new SamplerConfigurationImpl(sc))) { Reader copy = new Reader(this, true); copy.setInterruptFlagInternal(interruptFlag); deepCopies.add(copy); return copy; } else { throw new SampleNotPresentException(); } } else { Reader copy = new Reader(this, false); copy.setInterruptFlagInternal(interruptFlag); deepCopies.add(copy); return copy; } } @Override public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) { throw new UnsupportedOperationException(); } /** * @return map of locality group names to column families. The default locality group will have * {@code null} for a name. RFile will only track up to {@value Writer#MAX_CF_IN_DLG} * families for the default locality group. After this it will stop tracking. For the * case where the default group has more thn {@value Writer#MAX_CF_IN_DLG} families an * empty list of families is returned. * @see LocalityGroupUtil#seek(FileSKVIterator, Range, String, Map) */ public Map<String,ArrayList<ByteSequence>> getLocalityGroupCF() { Map<String,ArrayList<ByteSequence>> cf = new HashMap<>(); for (LocalityGroupMetadata lcg : localityGroups) { ArrayList<ByteSequence> setCF; if (lcg.columnFamilies == null) { Preconditions.checkState(lcg.isDefaultLG, "Group %s has null families. " + "Only expect default locality group to have null families.", lcg.name); setCF = new ArrayList<>(); } else { setCF = new ArrayList<>(lcg.columnFamilies.keySet()); } cf.put(lcg.name, setCF); } return cf; } /** * Method that registers the given MetricsGatherer. You can only register one as it will clobber * any previously set. The MetricsGatherer should be registered before iterating through the * LocalityGroups. * * @param vmg MetricsGatherer to be registered with the LocalityGroupReaders */ public void registerMetrics(MetricsGatherer<?> vmg) { vmg.init(getLocalityGroupCF()); for (LocalityGroupReader lgr : currentReaders) { lgr.registerMetrics(vmg); } if (sampleReaders != null) { for (LocalityGroupReader lgr : sampleReaders) { lgr.registerMetrics(vmg); } } } @Override public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException { lgCache = LocalityGroupIterator.seek(this, lgContext, range, columnFamilies, inclusive, lgCache); } int getNumLocalityGroupsSeeked() { return (lgCache == null ? 0 : lgCache.getNumLGSeeked()); } @Override public FileSKVIterator getIndex() throws IOException { ArrayList<Iterator<IndexEntry>> indexes = new ArrayList<>(); for (LocalityGroupReader lgr : currentReaders) { indexes.add(lgr.getIndex()); } return new MultiIndexIterator(this, indexes); } @Override public Reader getSample(SamplerConfigurationImpl sampleConfig) { requireNonNull(sampleConfig); if (this.samplerConfig != null && this.samplerConfig.equals(sampleConfig)) { Reader copy = new Reader(this, sampleReaders); copy.setInterruptFlagInternal(interruptFlag); return copy; } return null; } // only visible for printinfo FileSKVIterator getSample() { if (samplerConfig == null) { return null; } return getSample(this.samplerConfig); } public void printInfo(boolean includeIndexDetails) throws IOException { System.out.printf("%-24s : %d\n", "RFile Version", rfileVersion); System.out.println(); for (LocalityGroupMetadata lgm : localityGroups) { lgm.printInfo(false, includeIndexDetails); } if (!sampleGroups.isEmpty()) { System.out.println(); System.out.printf("%-24s :\n", "Sample Configuration"); System.out.printf("\t%-22s : %s\n", "Sampler class ", samplerConfig.getClassName()); System.out.printf("\t%-22s : %s\n", "Sampler options ", samplerConfig.getOptions()); System.out.println(); for (LocalityGroupMetadata lgm : sampleGroups) { lgm.printInfo(true, includeIndexDetails); } } } @Override public void setInterruptFlag(AtomicBoolean flag) { if (deepCopy) { throw new IllegalStateException("Calling setInterruptFlag on a deep copy is not supported"); } if (!deepCopies.isEmpty()) { throw new IllegalStateException( "Setting interrupt flag after calling deep copy not supported"); } setInterruptFlagInternal(flag); } private void setInterruptFlagInternal(AtomicBoolean flag) { this.interruptFlag = flag; for (LocalityGroupReader lgr : currentReaders) { lgr.setInterruptFlag(interruptFlag); } } @Override public void setCacheProvider(CacheProvider cacheProvider) { reader.setCacheProvider(cacheProvider); } @Override public void reset() { clear(); } } public interface RFileSKVIterator extends FileSKVIterator { FileSKVIterator getIndex() throws IOException; void reset(); } static abstract class FencedFileSKVIterator implements FileSKVIterator { private final FileSKVIterator reader; protected final Range fence; private final Key fencedStartKey; private final Supplier<Key> fencedEndKey; public FencedFileSKVIterator(FileSKVIterator reader, Range fence) { this.reader = Objects.requireNonNull(reader); this.fence = Objects.requireNonNull(fence); this.fencedStartKey = fence.getStartKey(); this.fencedEndKey = Suppliers.memoize(() -> getEndKey(fence.getEndKey())); } @Override public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException { throw new UnsupportedOperationException(); } @Override public boolean hasTop() { return reader.hasTop(); } @Override public void next() throws IOException { reader.next(); } @Override public Key getTopKey() { return reader.getTopKey(); } @Override public Value getTopValue() { return reader.getTopValue(); } @Override public Text getFirstRow() throws IOException { var row = reader.getFirstRow(); if (row != null && fence.beforeStartKey(new Key(row))) { return fencedStartKey.getRow(); } else { return row; } } @Override public Text getLastRow() throws IOException { var row = reader.getLastRow(); if (row != null && fence.afterEndKey(new Key(row))) { return fencedEndKey.get().getRow(); } else { return row; } } @Override public boolean isRunningLowOnMemory() { return reader.isRunningLowOnMemory(); } @Override public void setInterruptFlag(AtomicBoolean flag) { reader.setInterruptFlag(flag); } @Override public DataInputStream getMetaStore(String name) throws IOException { return reader.getMetaStore(name); } @Override public void closeDeepCopies() throws IOException { reader.closeDeepCopies(); } @Override public void setCacheProvider(CacheProvider cacheProvider) { reader.setCacheProvider(cacheProvider); } @Override public void close() throws IOException { reader.close(); } private Key getEndKey(Key key) { // If they key is infinite it will be null or if inclusive we can just use it as is // as it would be the correct value for getLastKey() if (fence.isInfiniteStopKey() || fence.isEndKeyInclusive()) { return key; } // If exclusive we need to strip the last byte to get the last key that is part of the // actual range to return final byte[] ba = key.getRow().getBytes(); Preconditions.checkArgument(ba.length > 0 && ba[ba.length - 1] == (byte) 0x00); byte[] fba = new byte[ba.length - 1]; System.arraycopy(ba, 0, fba, 0, ba.length - 1); return new Key(fba); } } static class FencedIndex extends FencedFileSKVIterator { private final FileSKVIterator source; public FencedIndex(FileSKVIterator source, Range seekFence) { super(source, seekFence); this.source = source; } @Override public boolean hasTop() { // this code filters out data because the rfile index iterators do not support seek // If startKey is set then discard everything until we reach the start // of the range if (fence.getStartKey() != null) { while (source.hasTop() && fence.beforeStartKey(source.getTopKey())) { try { source.next(); } catch (IOException e) { throw new UncheckedIOException(e); } } } // If endKey is set then ensure that the current key is not passed the end of the range return source.hasTop() && !fence.afterEndKey(source.getTopKey()); } @Override public FileSKVIterator getSample(SamplerConfigurationImpl sampleConfig) { throw new UnsupportedOperationException(); } @Override public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException { throw new UnsupportedOperationException(); } @Override public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) { throw new UnsupportedOperationException(); } } static class FencedReader extends FencedFileSKVIterator implements RFileSKVIterator { private final Reader reader; public FencedReader(Reader reader, Range seekFence) { super(reader, seekFence); this.reader = reader; } @Override public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException { reader.reset(); if (fence != null) { range = fence.clip(range, true); if (range == null) { return; } } reader.seek(range, columnFamilies, inclusive); } @Override public FencedReader deepCopy(IteratorEnvironment env) { return new FencedReader(reader.deepCopy(env), fence); } @Override public FileSKVIterator getIndex() throws IOException { return new FencedIndex(reader.getIndex(), fence); } @Override public FileSKVIterator getSample(SamplerConfigurationImpl sampleConfig) { final Reader sample = reader.getSample(sampleConfig); return sample != null ? new FencedReader(sample, fence) : null; } @Override public void reset() { reader.reset(); } } public static RFileSKVIterator getReader(final CachableBuilder cb, final TabletFile dataFile) throws IOException { final RFile.Reader reader = new RFile.Reader(Objects.requireNonNull(cb)); return dataFile.hasRange() ? new FencedReader(reader, dataFile.getRange()) : reader; } public static RFileSKVIterator getReader(final CachableBuilder cb, Range range) throws IOException { final RFile.Reader reader = new RFile.Reader(Objects.requireNonNull(cb)); return !range.isInfiniteStartKey() || !range.isInfiniteStopKey() ? new FencedReader(reader, range) : reader; } }
9,654
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/RelativeKey.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.function.Supplier; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.util.MutableByteSequence; import org.apache.accumulo.core.util.UnsynchronizedBuffer; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; public class RelativeKey implements Writable { private static final byte BIT = 0x01; private Key key; private Key prevKey; private byte fieldsSame; private byte fieldsPrefixed; // Exact match compression options (first byte) and flag for further private static final byte ROW_SAME = BIT << 0; private static final byte CF_SAME = BIT << 1; private static final byte CQ_SAME = BIT << 2; private static final byte CV_SAME = BIT << 3; private static final byte TS_SAME = BIT << 4; private static final byte DELETED = BIT << 5; // private static final byte UNUSED_1_6 = BIT << 6; private static final byte PREFIX_COMPRESSION_ENABLED = (byte) (BIT << 7); // Prefix compression (second byte) private static final byte ROW_COMMON_PREFIX = BIT << 0; private static final byte CF_COMMON_PREFIX = BIT << 1; private static final byte CQ_COMMON_PREFIX = BIT << 2; private static final byte CV_COMMON_PREFIX = BIT << 3; private static final byte TS_DIFF = BIT << 4; // private static final byte UNUSED_2_5 = BIT << 5; // private static final byte UNUSED_2_6 = BIT << 6; // private static final byte UNUSED_2_7 = (byte) (BIT << 7); // Values for prefix compression int rowCommonPrefixLen; int cfCommonPrefixLen; int cqCommonPrefixLen; int cvCommonPrefixLen; long tsDiff; /** * This constructor is used when one needs to read from an input stream */ public RelativeKey() { } /** * This constructor is used when constructing a key for writing to an output stream */ public RelativeKey(Key prevKey, Key key) { this.key = key; fieldsSame = 0; fieldsPrefixed = 0; if (prevKey != null) { ByteSequence prevKeyScratch = prevKey.getRowData(); ByteSequence keyScratch = key.getRowData(); rowCommonPrefixLen = getCommonPrefixLen(prevKeyScratch, keyScratch, ROW_SAME, ROW_COMMON_PREFIX); prevKeyScratch = prevKey.getColumnFamilyData(); keyScratch = key.getColumnFamilyData(); cfCommonPrefixLen = getCommonPrefixLen(prevKeyScratch, keyScratch, CF_SAME, CF_COMMON_PREFIX); prevKeyScratch = prevKey.getColumnQualifierData(); keyScratch = key.getColumnQualifierData(); cqCommonPrefixLen = getCommonPrefixLen(prevKeyScratch, keyScratch, CQ_SAME, CQ_COMMON_PREFIX); prevKeyScratch = prevKey.getColumnVisibilityData(); keyScratch = key.getColumnVisibilityData(); cvCommonPrefixLen = getCommonPrefixLen(prevKeyScratch, keyScratch, CV_SAME, CV_COMMON_PREFIX); tsDiff = key.getTimestamp() - prevKey.getTimestamp(); if (tsDiff == 0) { fieldsSame |= TS_SAME; } else { fieldsPrefixed |= TS_DIFF; } fieldsSame |= fieldsPrefixed == 0 ? 0 : PREFIX_COMPRESSION_ENABLED; } // stored deleted information in bit vector instead of its own byte if (key.isDeleted()) { fieldsSame |= DELETED; } } private int getCommonPrefixLen(ByteSequence prevKeyScratch, ByteSequence keyScratch, byte fieldBit, byte commonPrefix) { int commonPrefixLen = getCommonPrefix(prevKeyScratch, keyScratch); if (commonPrefixLen == -1) { fieldsSame |= fieldBit; } else if (commonPrefixLen > 1) { fieldsPrefixed |= commonPrefix; } return commonPrefixLen; } /** * * @return -1 (exact match) or the number of bytes in common */ static int getCommonPrefix(ByteSequence prev, ByteSequence cur) { if (prev == cur) { return -1; // infinite... exact match } int prevLen = prev.length(); int curLen = cur.length(); int maxChecks = Math.min(prevLen, curLen); int common = 0; while (common < maxChecks) { int a = prev.byteAt(common) & 0xff; int b = cur.byteAt(common) & 0xff; if (a != b) { return common; } common++; } // no differences found // either exact or matches the part checked, so if they are the same length, they are an exact // match, // and if not, then they have a common prefix over all the checks we've done return prevLen == curLen ? -1 : maxChecks; } public void setPrevKey(Key pk) { this.prevKey = pk; } @Override public void readFields(DataInput in) throws IOException { fieldsSame = in.readByte(); if ((fieldsSame & PREFIX_COMPRESSION_ENABLED) == PREFIX_COMPRESSION_ENABLED) { fieldsPrefixed = in.readByte(); } else { fieldsPrefixed = 0; } final byte[] row, cf, cq, cv; final long ts; row = getData(in, ROW_SAME, ROW_COMMON_PREFIX, () -> prevKey.getRowData()); cf = getData(in, CF_SAME, CF_COMMON_PREFIX, () -> prevKey.getColumnFamilyData()); cq = getData(in, CQ_SAME, CQ_COMMON_PREFIX, () -> prevKey.getColumnQualifierData()); cv = getData(in, CV_SAME, CV_COMMON_PREFIX, () -> prevKey.getColumnVisibilityData()); if ((fieldsSame & TS_SAME) == TS_SAME) { ts = prevKey.getTimestamp(); } else if ((fieldsPrefixed & TS_DIFF) == TS_DIFF) { ts = WritableUtils.readVLong(in) + prevKey.getTimestamp(); } else { ts = WritableUtils.readVLong(in); } this.key = new Key(row, cf, cq, cv, ts, (fieldsSame & DELETED) == DELETED, false); this.prevKey = this.key; } private byte[] getData(DataInput in, byte fieldBit, byte commonPrefix, Supplier<ByteSequence> data) throws IOException { if ((fieldsSame & fieldBit) == fieldBit) { return data.get().toArray(); } else if ((fieldsPrefixed & commonPrefix) == commonPrefix) { return readPrefix(in, data.get()); } else { return read(in); } } public static class SkippR { RelativeKey rk; int skipped; Key prevKey; SkippR(RelativeKey rk, int skipped, Key prevKey) { this.rk = rk; this.skipped = skipped; this.prevKey = prevKey; } } public static SkippR fastSkip(DataInput in, Key seekKey, MutableByteSequence value, Key prevKey, Key currKey, int entriesLeft) throws IOException { // this method mostly avoids object allocation and only does compares when the row changes MutableByteSequence row, cf, cq, cv; MutableByteSequence prow, pcf, pcq, pcv; ByteSequence stopRow = seekKey.getRowData(); ByteSequence stopCF = seekKey.getColumnFamilyData(); ByteSequence stopCQ = seekKey.getColumnQualifierData(); long ts = -1; long pts = -1; boolean pdel = false; int rowCmp = -1, cfCmp = -1, cqCmp = -1; if (currKey != null) { prow = new MutableByteSequence(currKey.getRowData()); pcf = new MutableByteSequence(currKey.getColumnFamilyData()); pcq = new MutableByteSequence(currKey.getColumnQualifierData()); pcv = new MutableByteSequence(currKey.getColumnVisibilityData()); pts = currKey.getTimestamp(); row = new MutableByteSequence(currKey.getRowData()); cf = new MutableByteSequence(currKey.getColumnFamilyData()); cq = new MutableByteSequence(currKey.getColumnQualifierData()); cv = new MutableByteSequence(currKey.getColumnVisibilityData()); ts = currKey.getTimestamp(); rowCmp = row.compareTo(stopRow); cfCmp = cf.compareTo(stopCF); cqCmp = cq.compareTo(stopCQ); if (rowCmp >= 0) { if (rowCmp > 0) { RelativeKey rk = new RelativeKey(); rk.key = rk.prevKey = new Key(currKey); return new SkippR(rk, 0, prevKey); } if (cfCmp >= 0) { if (cfCmp > 0) { RelativeKey rk = new RelativeKey(); rk.key = rk.prevKey = new Key(currKey); return new SkippR(rk, 0, prevKey); } if (cqCmp >= 0) { RelativeKey rk = new RelativeKey(); rk.key = rk.prevKey = new Key(currKey); return new SkippR(rk, 0, prevKey); } } } } else { row = new MutableByteSequence(new byte[64], 0, 0); cf = new MutableByteSequence(new byte[64], 0, 0); cq = new MutableByteSequence(new byte[64], 0, 0); cv = new MutableByteSequence(new byte[64], 0, 0); prow = new MutableByteSequence(new byte[64], 0, 0); pcf = new MutableByteSequence(new byte[64], 0, 0); pcq = new MutableByteSequence(new byte[64], 0, 0); pcv = new MutableByteSequence(new byte[64], 0, 0); } byte fieldsSame = -1; byte fieldsPrefixed = 0; int count = 0; Key newPrevKey = null; while (count < entriesLeft) { pdel = (fieldsSame & DELETED) == DELETED; fieldsSame = in.readByte(); if ((fieldsSame & PREFIX_COMPRESSION_ENABLED) == PREFIX_COMPRESSION_ENABLED) { fieldsPrefixed = in.readByte(); } else { fieldsPrefixed = 0; } boolean changed = false; if ((fieldsSame & ROW_SAME) != ROW_SAME) { MutableByteSequence tmp = prow; prow = row; row = tmp; if ((fieldsPrefixed & ROW_COMMON_PREFIX) == ROW_COMMON_PREFIX) { readPrefix(in, row, prow); } else { read(in, row); } // read a new row, so need to compare... rowCmp = row.compareTo(stopRow); changed = true; } // else the row is the same as the last, so no need to compare if ((fieldsSame & CF_SAME) != CF_SAME) { MutableByteSequence tmp = pcf; pcf = cf; cf = tmp; if ((fieldsPrefixed & CF_COMMON_PREFIX) == CF_COMMON_PREFIX) { readPrefix(in, cf, pcf); } else { read(in, cf); } cfCmp = cf.compareTo(stopCF); changed = true; } if ((fieldsSame & CQ_SAME) != CQ_SAME) { MutableByteSequence tmp = pcq; pcq = cq; cq = tmp; if ((fieldsPrefixed & CQ_COMMON_PREFIX) == CQ_COMMON_PREFIX) { readPrefix(in, cq, pcq); } else { read(in, cq); } cqCmp = cq.compareTo(stopCQ); changed = true; } if ((fieldsSame & CV_SAME) != CV_SAME) { MutableByteSequence tmp = pcv; pcv = cv; cv = tmp; if ((fieldsPrefixed & CV_COMMON_PREFIX) == CV_COMMON_PREFIX) { readPrefix(in, cv, pcv); } else { read(in, cv); } } if ((fieldsSame & TS_SAME) != TS_SAME) { pts = ts; if ((fieldsPrefixed & TS_DIFF) == TS_DIFF) { ts = WritableUtils.readVLong(in) + pts; } else { ts = WritableUtils.readVLong(in); } } readValue(in, value); count++; if (changed && rowCmp >= 0) { if (rowCmp > 0) { break; } if (cfCmp >= 0) { if (cfCmp > 0) { break; } if (cqCmp >= 0) { break; } } } } if (count > 1) { MutableByteSequence trow, tcf, tcq, tcv; long tts; // when the current keys field is same as the last, then // set the prev keys field the same as the current key trow = (fieldsSame & ROW_SAME) == ROW_SAME ? row : prow; tcf = (fieldsSame & CF_SAME) == CF_SAME ? cf : pcf; tcq = (fieldsSame & CQ_SAME) == CQ_SAME ? cq : pcq; tcv = (fieldsSame & CV_SAME) == CV_SAME ? cv : pcv; tts = (fieldsSame & TS_SAME) == TS_SAME ? ts : pts; newPrevKey = new Key(trow.getBackingArray(), trow.offset(), trow.length(), tcf.getBackingArray(), tcf.offset(), tcf.length(), tcq.getBackingArray(), tcq.offset(), tcq.length(), tcv.getBackingArray(), tcv.offset(), tcv.length(), tts); newPrevKey.setDeleted(pdel); } else if (count == 1) { if (currKey != null) { newPrevKey = currKey; } else { newPrevKey = prevKey; } } else { throw new IllegalStateException(); } RelativeKey result = new RelativeKey(); result.key = new Key(row.getBackingArray(), row.offset(), row.length(), cf.getBackingArray(), cf.offset(), cf.length(), cq.getBackingArray(), cq.offset(), cq.length(), cv.getBackingArray(), cv.offset(), cv.length(), ts); result.key.setDeleted((fieldsSame & DELETED) != 0); result.prevKey = result.key; return new SkippR(result, count, newPrevKey); } private static void read(DataInput in, MutableByteSequence mbseq) throws IOException { int len = WritableUtils.readVInt(in); read(in, mbseq, len); } private static void readValue(DataInput in, MutableByteSequence mbseq) throws IOException { int len = in.readInt(); read(in, mbseq, len); } private static void read(DataInput in, MutableByteSequence mbseqDestination, int len) throws IOException { if (mbseqDestination.getBackingArray().length < len) { mbseqDestination.setArray(new byte[UnsynchronizedBuffer.nextArraySize(len)], 0, 0); } in.readFully(mbseqDestination.getBackingArray(), 0, len); mbseqDestination.setLength(len); } private static byte[] readPrefix(DataInput in, ByteSequence prefixSource) throws IOException { int prefixLen = WritableUtils.readVInt(in); int remainingLen = WritableUtils.readVInt(in); byte[] data = new byte[prefixLen + remainingLen]; if (prefixSource.isBackedByArray()) { System.arraycopy(prefixSource.getBackingArray(), prefixSource.offset(), data, 0, prefixLen); } else { byte[] prefixArray = prefixSource.toArray(); System.arraycopy(prefixArray, 0, data, 0, prefixLen); } // read remaining in.readFully(data, prefixLen, remainingLen); return data; } private static void readPrefix(DataInput in, MutableByteSequence dest, ByteSequence prefixSource) throws IOException { int prefixLen = WritableUtils.readVInt(in); int remainingLen = WritableUtils.readVInt(in); int len = prefixLen + remainingLen; if (dest.getBackingArray().length < len) { dest.setArray(new byte[UnsynchronizedBuffer.nextArraySize(len)], 0, 0); } if (prefixSource.isBackedByArray()) { System.arraycopy(prefixSource.getBackingArray(), prefixSource.offset(), dest.getBackingArray(), 0, prefixLen); } else { byte[] prefixArray = prefixSource.toArray(); System.arraycopy(prefixArray, 0, dest.getBackingArray(), 0, prefixLen); } // read remaining in.readFully(dest.getBackingArray(), prefixLen, remainingLen); dest.setLength(len); } private static byte[] read(DataInput in) throws IOException { int len = WritableUtils.readVInt(in); byte[] data = new byte[len]; in.readFully(data); return data; } public Key getKey() { return key; } private static void write(DataOutput out, ByteSequence bs) throws IOException { WritableUtils.writeVInt(out, bs.length()); out.write(bs.getBackingArray(), bs.offset(), bs.length()); } private static void writePrefix(DataOutput out, ByteSequence bs, int commonPrefixLength) throws IOException { WritableUtils.writeVInt(out, commonPrefixLength); WritableUtils.writeVInt(out, bs.length() - commonPrefixLength); out.write(bs.getBackingArray(), bs.offset() + commonPrefixLength, bs.length() - commonPrefixLength); } @Override public void write(DataOutput out) throws IOException { out.writeByte(fieldsSame); if ((fieldsSame & PREFIX_COMPRESSION_ENABLED) == PREFIX_COMPRESSION_ENABLED) { out.write(fieldsPrefixed); } if ((fieldsSame & ROW_SAME) == ROW_SAME) { // same, write nothing } else if ((fieldsPrefixed & ROW_COMMON_PREFIX) == ROW_COMMON_PREFIX) { // similar, write what's common writePrefix(out, key.getRowData(), rowCommonPrefixLen); } else { // write it all write(out, key.getRowData()); } if ((fieldsSame & CF_SAME) == CF_SAME) { // same, write nothing } else if ((fieldsPrefixed & CF_COMMON_PREFIX) == CF_COMMON_PREFIX) { // similar, write what's common writePrefix(out, key.getColumnFamilyData(), cfCommonPrefixLen); } else { // write it all write(out, key.getColumnFamilyData()); } if ((fieldsSame & CQ_SAME) == CQ_SAME) { // same, write nothing } else if ((fieldsPrefixed & CQ_COMMON_PREFIX) == CQ_COMMON_PREFIX) { // similar, write what's common writePrefix(out, key.getColumnQualifierData(), cqCommonPrefixLen); } else { // write it all write(out, key.getColumnQualifierData()); } if ((fieldsSame & CV_SAME) == CV_SAME) { // same, write nothing } else if ((fieldsPrefixed & CV_COMMON_PREFIX) == CV_COMMON_PREFIX) { // similar, write what's common writePrefix(out, key.getColumnVisibilityData(), cvCommonPrefixLen); } else { // write it all write(out, key.getColumnVisibilityData()); } if ((fieldsSame & TS_SAME) == TS_SAME) { // same, write nothing } else if ((fieldsPrefixed & TS_DIFF) == TS_DIFF) { // similar, write what's common WritableUtils.writeVLong(out, tsDiff); } else { // write it all WritableUtils.writeVLong(out, key.getTimestamp()); } } }
9,655
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/SplitLarge.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import java.util.ArrayList; import java.util.List; import org.apache.accumulo.core.cli.ConfigOpts; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.crypto.CryptoFactoryLoader; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachableBuilder; import org.apache.accumulo.core.file.rfile.RFile.Reader; import org.apache.accumulo.core.file.rfile.RFile.Writer; import org.apache.accumulo.core.file.rfile.bcfile.BCFile; import org.apache.accumulo.core.spi.crypto.CryptoService; import org.apache.accumulo.start.spi.KeywordExecutable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import com.beust.jcommander.Parameter; import com.google.auto.service.AutoService; /** * Split an RFile into large and small key/value files. */ @AutoService(KeywordExecutable.class) public class SplitLarge implements KeywordExecutable { static class Opts extends ConfigOpts { @Parameter(names = "-m", description = "the maximum size of the key/value pair to shunt to the small file") long maxSize = 10 * 1024 * 1024; @Parameter(description = "<file.rf> { <file.rf> ... }") List<String> files = new ArrayList<>(); } public static void main(String[] args) throws Exception { new SplitLarge().execute(args); } @Override public String keyword() { return "split-large"; } @Override public String description() { return "Splits an RFile into large and small key/value files"; } @Override public void execute(String[] args) throws Exception { Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); Opts opts = new Opts(); opts.parseArgs("accumulo split-large", args); for (String file : opts.files) { AccumuloConfiguration aconf = opts.getSiteConfiguration(); CryptoService cs = CryptoFactoryLoader.getServiceForServer(aconf); Path path = new Path(file); CachableBuilder cb = new CachableBuilder().fsPath(fs, path).conf(conf).cryptoService(cs); try (Reader iter = new RFile.Reader(cb)) { if (!file.endsWith(".rf")) { throw new IllegalArgumentException("File must end with .rf"); } String smallName = file.substring(0, file.length() - 3) + "_small.rf"; String largeName = file.substring(0, file.length() - 3) + "_large.rf"; int blockSize = (int) aconf.getAsBytes(Property.TABLE_FILE_BLOCK_SIZE); try ( Writer small = new RFile.Writer( new BCFile.Writer(fs.create(new Path(smallName)), null, "gz", conf, cs), blockSize); Writer large = new RFile.Writer( new BCFile.Writer(fs.create(new Path(largeName)), null, "gz", conf, cs), blockSize)) { small.startDefaultLocalityGroup(); large.startDefaultLocalityGroup(); iter.seek(new Range(), new ArrayList<>(), false); while (iter.hasTop()) { Key key = iter.getTopKey(); Value value = iter.getTopValue(); if (key.getSize() + value.getSize() < opts.maxSize) { small.append(key, value); } else { large.append(key, value); } iter.next(); } } } } } }
9,656
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/RollingStats.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import org.apache.commons.math3.stat.StatUtils; import org.apache.commons.math3.util.FastMath; /** * This class supports efficient window statistics. Apache commons math3 has a class called * DescriptiveStatistics that supports windows. DescriptiveStatistics recomputes the statistics over * the entire window each time its requested. In a test over 1,000,000 entries with a window size of * 1019 that requested stats for each entry this class took ~50ms and DescriptiveStatistics took * ~6,000ms. * * <p> * This class may not be as accurate as DescriptiveStatistics. In unit test its within 1/1000 of * DescriptiveStatistics. */ class RollingStats { private int position; private double[] window; private double average; private double variance; private double stddev; // indicates if the window is full private boolean windowFull; private int recomputeCounter = 0; RollingStats(int windowSize) { this.windowFull = false; this.position = 0; this.window = new double[windowSize]; } /** * @see <a href= * "https://jonisalonen.com/2014/efficient-and-accurate-rolling-standard-deviation">Efficient * and accurate rolling standard deviation</a> */ private void update(double newValue, double oldValue, int windowSize) { double delta = newValue - oldValue; double oldAverage = average; average = average + delta / windowSize; variance += delta * (newValue - average + oldValue - oldAverage) / (windowSize - 1); stddev = FastMath.sqrt(variance); } void addValue(long stat) { double old = window[position]; window[position] = stat; position++; recomputeCounter++; if (windowFull) { update(stat, old, window.length); } else if (position == window.length) { computeStats(window.length); windowFull = true; } if (position == window.length) { position = 0; } } private void computeStats(int len) { average = StatUtils.mean(window, 0, len); variance = StatUtils.variance(window, average, 0, len); stddev = FastMath.sqrt(variance); recomputeCounter = 0; } private void computeStats() { if (windowFull) { if (variance < 0 || recomputeCounter >= 100) { // incremental computation drifts over time, so periodically force a recompute computeStats(window.length); } } else if (recomputeCounter > 0) { computeStats(position); } } double getMean() { computeStats(); return average; } double getVariance() { computeStats(); return variance; } double getStandardDeviation() { computeStats(); return stddev; } boolean isWindowFull() { return windowFull; } }
9,657
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/KeyShortener.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import org.apache.accumulo.core.data.ArrayByteSequence; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.primitives.Bytes; /** * Code to shorten keys that will be placed into RFile indexes. This code attempts to find a key * that's between two keys that shorter. */ public class KeyShortener { private static final byte[] EMPTY = new byte[0]; private static final byte[] B00 = {(byte) 0x00}; private static final byte[] BFF = {(byte) 0xff}; private static final Logger log = LoggerFactory.getLogger(KeyShortener.class); private KeyShortener() {} private static int findNonFF(ByteSequence bs, int start) { for (int i = start; i < bs.length(); i++) { if (bs.byteAt(i) != (byte) 0xff) { return i; } } return bs.length(); } /* * return S such that prev < S < current or null if no such sequence */ public static ByteSequence shorten(ByteSequence prev, ByteSequence current) { int minLen = Math.min(prev.length(), current.length()); for (int i = 0; i < minLen; i++) { int pb = 0xff & prev.byteAt(i); int cb = 0xff & current.byteAt(i); int diff = cb - pb; if (diff == 1) { int newLen = findNonFF(prev, i + 1); byte[] successor; if (newLen < prev.length()) { successor = Bytes.concat(prev.subSequence(0, newLen).toArray(), BFF); } else { successor = Bytes.concat(prev.subSequence(0, newLen).toArray(), B00); } return new ArrayByteSequence(successor); } else if (diff > 1) { byte[] copy = new byte[i + 1]; System.arraycopy(prev.subSequence(0, i + 1).toArray(), 0, copy, 0, i + 1); copy[i] = (byte) ((0xff & copy[i]) + 1); return new ArrayByteSequence(copy); } } ArrayByteSequence successor = new ArrayByteSequence(Bytes.concat(prev.toArray(), B00)); if (successor.equals(current)) { return null; } return successor; } /* * This entire class supports an optional optimization. This code does a sanity check to ensure * the optimization code did what was intended, doing a noop if there is a bug. */ @VisibleForTesting static Key sanityCheck(Key prev, Key current, Key shortened) { if (prev.compareTo(shortened) >= 0) { log.warn("Bug in key shortening code, please open an issue " + prev + " >= " + shortened); return prev; } if (current.compareTo(shortened) <= 0) { log.warn("Bug in key shortening code, please open an issue " + current + " <= " + shortened); return prev; } return shortened; } /* * Find a key K where prev < K < current AND K is shorter. If can not find a K that meets * criteria, then returns prev. */ public static Key shorten(Key prev, Key current) { Preconditions.checkArgument(prev.compareTo(current) <= 0, "Expected key less than or equal. " + prev + " > " + current); if (prev.getRowData().compareTo(current.getRowData()) < 0) { ByteSequence shortenedRow = shorten(prev.getRowData(), current.getRowData()); if (shortenedRow == null) { return prev; } return sanityCheck(prev, current, new Key(shortenedRow.toArray(), EMPTY, EMPTY, EMPTY, 0)); } else if (prev.getColumnFamilyData().compareTo(current.getColumnFamilyData()) < 0) { ByteSequence shortenedFam = shorten(prev.getColumnFamilyData(), current.getColumnFamilyData()); if (shortenedFam == null) { return prev; } return sanityCheck(prev, current, new Key(prev.getRowData().toArray(), shortenedFam.toArray(), EMPTY, EMPTY, 0)); } else if (prev.getColumnQualifierData().compareTo(current.getColumnQualifierData()) < 0) { ByteSequence shortenedQual = shorten(prev.getColumnQualifierData(), current.getColumnQualifierData()); if (shortenedQual == null) { return prev; } return sanityCheck(prev, current, new Key(prev.getRowData().toArray(), prev.getColumnFamilyData().toArray(), shortenedQual.toArray(), EMPTY, 0)); } else { return prev; } } }
9,658
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/VisMetricsGatherer.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import static java.nio.charset.StandardCharsets.UTF_8; import java.io.PrintStream; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import org.apache.accumulo.core.data.ArrayByteSequence; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; import org.apache.hadoop.io.Text; import com.google.common.util.concurrent.AtomicLongMap; /** * This class provides visibility metrics per locality group. The Map in getMetrics() maps the * locality group name to an ArrayList of VisibilityMetric objects. These contain the components of * a visibility metric; the visibility as a String, the number of times that is seen in a locality * group, the percentage of keys that contain that visibility in the locality group, the number of * blocks in the locality group that contain the visibility, and the percentage of blocks in the * locality group that contain the visibility. */ public class VisMetricsGatherer implements MetricsGatherer<Map<String,ArrayList<VisibilityMetric>>> { private static final String KEY_HASH_ALGORITHM = "SHA-256"; protected Map<String,AtomicLongMap<String>> metric; protected Map<String,AtomicLongMap<String>> blocks; protected ArrayList<Long> numEntries; protected ArrayList<Integer> numBlocks; private ArrayList<String> inBlock; protected ArrayList<String> localityGroups; private int numLG; private Map<String,ArrayList<ByteSequence>> localityGroupCF; public VisMetricsGatherer() { metric = new HashMap<>(); blocks = new HashMap<>(); numEntries = new ArrayList<>(); numBlocks = new ArrayList<>(); inBlock = new ArrayList<>(); localityGroups = new ArrayList<>(); numLG = 0; } @Override public void init(Map<String,ArrayList<ByteSequence>> cf) { localityGroupCF = cf; } @Override public void startLocalityGroup(Text oneCF) { String name = null; ByteSequence cf = new ArrayByteSequence(oneCF.toString()); for (Entry<String,ArrayList<ByteSequence>> entry : localityGroupCF.entrySet()) { if (entry.getValue().contains(cf)) { if (entry.getKey() != null) { name = entry.getKey().toString(); } break; } } localityGroups.add(name); metric.put(name, AtomicLongMap.create(new HashMap<>())); blocks.put(name, AtomicLongMap.create(new HashMap<>())); numLG++; numEntries.add((long) 0); numBlocks.add(0); } @Override public void addMetric(Key key, Value val) { String myMetric = key.getColumnVisibility().toString(); String currLG = localityGroups.get(numLG - 1); if (metric.get(currLG).containsKey(myMetric)) { metric.get(currLG).getAndIncrement(myMetric); } else { metric.get(currLG).put(myMetric, 1); } numEntries.set(numLG - 1, numEntries.get(numLG - 1) + 1); if (!inBlock.contains(myMetric) && blocks.get(currLG).containsKey(myMetric)) { blocks.get(currLG).incrementAndGet(myMetric); inBlock.add(myMetric); } else if (!inBlock.contains(myMetric) && !blocks.get(currLG).containsKey(myMetric)) { blocks.get(currLG).put(myMetric, 1); inBlock.add(myMetric); } } @Override public void startBlock() { inBlock.clear(); numBlocks.set(numLG - 1, numBlocks.get(numLG - 1) + 1); } @Override public void printMetrics(boolean hash, String metricWord, PrintStream out) { for (int i = 0; i < numLG; i++) { String lGName = localityGroups.get(i); out.print("Locality Group: "); if (lGName == null) { out.println("<DEFAULT>"); } else { out.println(localityGroups.get(i)); } out.printf("%-27s", metricWord); out.println("Number of keys\t Percent of keys\tNumber of blocks\tPercent of blocks"); for (Entry<String,Long> entry : metric.get(lGName).asMap().entrySet()) { if (hash) { String encodedKey = ""; try { byte[] encodedBytes = MessageDigest.getInstance(KEY_HASH_ALGORITHM) .digest(entry.getKey().getBytes(UTF_8)); encodedKey = new String(encodedBytes, UTF_8); } catch (NoSuchAlgorithmException e) { out.println( "Failed to convert key to " + KEY_HASH_ALGORITHM + " hash: " + e.getMessage()); } out.printf("%-20s", encodedKey.substring(0, 8)); } else { out.printf("%-20s", entry.getKey()); } out.print("\t\t" + entry.getValue() + "\t\t\t"); out.printf("%.2f", ((double) entry.getValue() / numEntries.get(i)) * 100); out.print("%\t\t\t"); long blocksIn = blocks.get(lGName).get(entry.getKey()); out.print(blocksIn + "\t\t "); out.printf("%.2f", ((double) blocksIn / numBlocks.get(i)) * 100); out.print("%"); out.println(""); } out.println("Number of keys: " + numEntries.get(i)); out.println(); } } @Override public Map<String,ArrayList<VisibilityMetric>> getMetrics() { Map<String,ArrayList<VisibilityMetric>> getMetrics = new HashMap<>(); for (int i = 0; i < numLG; i++) { String lGName = localityGroups.get(i); ArrayList<VisibilityMetric> rows = new ArrayList<>(); for (Entry<String,Long> entry : metric.get(lGName).asMap().entrySet()) { long vis = entry.getValue(); double visPer = ((double) entry.getValue() / numEntries.get(i)) * 100; long blocksIn = blocks.get(lGName).get(entry.getKey()); double blocksPer = ((double) blocksIn / numBlocks.get(i)) * 100; rows.add(new VisibilityMetric(entry.getKey(), vis, visPer, blocksIn, blocksPer)); } getMetrics.put(lGName, rows); } return getMetrics; } }
9,659
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/VisibilityMetric.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; /** * Class that holds the components of a visibility metric. The String visibility, the number of * times that is seen in a locality group, the percentage of keys that contain that visibility in * the locality group, the number of blocks in the locality group that contain the visibility, and * the percentage of blocks in the locality group that contain the visibility. */ public class VisibilityMetric { private long visLG, visBlock; private double visLGPer, visBlockPer; private String visibility; public VisibilityMetric(String visibility, long visLG, double visLGPer, long visBlock, double visBlockPer) { this.visibility = visibility; this.visLG = visLG; this.visLGPer = visLGPer; this.visBlock = visBlock; this.visBlockPer = visBlockPer; } /** * @return the visibility */ public String getVisibility() { return visibility; } /** * @return the visLG */ public long getVisLG() { return visLG; } /** * @return the visBlock */ public long getVisBlock() { return visBlock; } /** * @return the visLGPer */ public double getVisLGPer() { return visLGPer; } /** * @return the visBlockPer */ public double getVisBlockPer() { return visBlockPer; } }
9,660
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import java.util.ArrayList; import java.util.List; import org.apache.accumulo.core.cli.Help; import org.apache.accumulo.core.conf.DefaultConfiguration; import org.apache.accumulo.core.file.FileSKVWriter; import org.apache.accumulo.core.file.rfile.bcfile.Compression; import org.apache.accumulo.core.metadata.UnreferencedTabletFile; import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory; import org.apache.accumulo.core.spi.file.rfile.compression.NoCompression; import org.apache.accumulo.start.spi.KeywordExecutable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.beust.jcommander.IParameterValidator; import com.beust.jcommander.Parameter; import com.beust.jcommander.ParameterException; import com.google.auto.service.AutoService; /** * Create an empty RFile for use in recovering from data loss where Accumulo still refers internally * to a path. */ @AutoService(KeywordExecutable.class) public class CreateEmpty implements KeywordExecutable { private static final Logger log = LoggerFactory.getLogger(CreateEmpty.class); public static class NamedLikeRFile implements IParameterValidator { @Override public void validate(String name, String value) throws ParameterException { if (!value.endsWith(".rf")) { throw new ParameterException("File must end with .rf and '" + value + "' does not."); } } } public static class IsSupportedCompressionAlgorithm implements IParameterValidator { @Override public void validate(String name, String value) throws ParameterException { List<String> algorithms = Compression.getSupportedAlgorithms(); if (!algorithms.contains(value)) { throw new ParameterException("Compression codec must be one of " + algorithms); } } } static class Opts extends Help { @Parameter(names = {"-c", "--codec"}, description = "the compression codec to use.", validateWith = IsSupportedCompressionAlgorithm.class) String codec = new NoCompression().getName(); @Parameter( description = " <path> { <path> ... } Each path given is a URL." + " Relative paths are resolved according to the default filesystem defined in" + " your Hadoop configuration, which is usually an HDFS instance.", required = true, validateWith = NamedLikeRFile.class) List<String> files = new ArrayList<>(); } public static void main(String[] args) throws Exception { new CreateEmpty().execute(args); } @Override public String keyword() { return "create-empty"; } @Override public String description() { return "Creates an empty rfile"; } @Override public void execute(String[] args) throws Exception { Configuration conf = new Configuration(); Opts opts = new Opts(); opts.parseArgs("accumulo create-empty", args); for (String arg : opts.files) { UnreferencedTabletFile file = UnreferencedTabletFile.of(conf, new Path(arg)); log.info("Writing to file '{}'", file); FileSKVWriter writer = new RFileOperations().newWriterBuilder() .forFile(file, file.getPath().getFileSystem(conf), conf, NoCryptoServiceFactory.NONE) .withTableConfiguration(DefaultConfiguration.getInstance()).withCompression(opts.codec) .build(); writer.close(); } } }
9,661
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiIndexIterator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import java.io.DataInputStream; import java.io.IOException; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.file.FileSKVIterator; import org.apache.accumulo.core.file.blockfile.impl.CacheProvider; import org.apache.accumulo.core.file.rfile.MultiLevelIndex.IndexEntry; import org.apache.accumulo.core.iterators.IteratorEnvironment; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; import org.apache.accumulo.core.iteratorsImpl.system.HeapIterator; import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl; import org.apache.hadoop.io.Text; class MultiIndexIterator extends HeapIterator implements FileSKVIterator { private RFile.Reader source; MultiIndexIterator(RFile.Reader source, List<Iterator<IndexEntry>> indexes) { super(indexes.size()); this.source = source; for (Iterator<IndexEntry> index : indexes) { addSource(new IndexIterator(index)); } } @Override public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) { throw new UnsupportedOperationException(); } @Override public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException { throw new UnsupportedOperationException(); } @Override public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException { throw new UnsupportedOperationException(); } @Override public void close() throws IOException { source.close(); } @Override public void closeDeepCopies() throws IOException { throw new UnsupportedOperationException(); } @Override public Text getFirstRow() throws IOException { throw new UnsupportedOperationException(); } @Override public Text getLastRow() throws IOException { throw new UnsupportedOperationException(); } @Override public DataInputStream getMetaStore(String name) throws IOException { throw new UnsupportedOperationException(); } @Override public void setInterruptFlag(AtomicBoolean flag) { throw new UnsupportedOperationException(); } @Override public FileSKVIterator getSample(SamplerConfigurationImpl sampleConfig) { throw new UnsupportedOperationException(); } @Override public void setCacheProvider(CacheProvider cacheProvider) { source.setCacheProvider(cacheProvider); } }
9,662
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/BlockIndex.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.concurrent.atomic.AtomicInteger; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.file.blockfile.cache.impl.ClassSize; import org.apache.accumulo.core.file.blockfile.cache.impl.SizeConstants; import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachedBlockRead; import org.apache.accumulo.core.file.rfile.MultiLevelIndex.IndexEntry; import org.apache.accumulo.core.spi.cache.CacheEntry.Weighable; public class BlockIndex implements Weighable { private BlockIndex() {} public static BlockIndex getIndex(CachedBlockRead cacheBlock, IndexEntry indexEntry) throws IOException { BlockIndex blockIndex = cacheBlock.getIndex(BlockIndex::new); if (blockIndex == null) { return null; } int accessCount = blockIndex.accessCount.incrementAndGet(); // 1 is a power of two, but do not care about it if (accessCount >= 2 && isPowerOfTwo(accessCount)) { blockIndex.buildIndex(accessCount, cacheBlock, indexEntry); cacheBlock.indexWeightChanged(); } if (blockIndex.blockIndex != null) { return blockIndex; } return null; } private static boolean isPowerOfTwo(int x) { return ((x > 0) && (x & (x - 1)) == 0); } private AtomicInteger accessCount = new AtomicInteger(0); private volatile BlockIndexEntry[] blockIndex = null; public static class BlockIndexEntry implements Comparable<BlockIndexEntry> { private Key prevKey; private int entriesLeft; private int pos; public BlockIndexEntry(int pos, int entriesLeft, Key prevKey) { this.pos = pos; this.entriesLeft = entriesLeft; this.prevKey = prevKey; } public BlockIndexEntry(Key key) { this.prevKey = key; } public int getEntriesLeft() { return entriesLeft; } @Override public int compareTo(BlockIndexEntry o) { return prevKey.compareTo(o.prevKey); } @Override public boolean equals(Object o) { if (o instanceof BlockIndexEntry) { return compareTo((BlockIndexEntry) o) == 0; } return false; } @Override public String toString() { return prevKey + " " + entriesLeft + " " + pos; } public Key getPrevKey() { return prevKey; } @Override public int hashCode() { throw new UnsupportedOperationException("hashCode not designed"); } int weight() { int keyWeight = ClassSize.align(prevKey.getSize()) + ClassSize.OBJECT + SizeConstants.SIZEOF_LONG + 4 * (ClassSize.ARRAY + ClassSize.REFERENCE); return 2 * SizeConstants.SIZEOF_INT + ClassSize.REFERENCE + ClassSize.OBJECT + keyWeight; } } public BlockIndexEntry seekBlock(Key startKey, CachedBlockRead cacheBlock) { // get a local ref to the index, another thread could change it BlockIndexEntry[] blockIndex = this.blockIndex; int pos = Arrays.binarySearch(blockIndex, new BlockIndexEntry(startKey)); int index; if (pos < 0) { if (pos == -1) { return null; // less than the first key in index, did not index the first key in block so } // just return null... code calling this will scan from beginning // of block index = (pos * -1) - 2; } else { // found exact key in index index = pos; while (index > 0) { if (blockIndex[index].getPrevKey().equals(startKey)) { index--; } else { break; } } } // handle case where multiple keys in block are exactly the same, want to find the earliest key // in the index while (index - 1 > 0) { if (blockIndex[index].getPrevKey().equals(blockIndex[index - 1].getPrevKey())) { index--; } else { break; } } if (index == 0 && blockIndex[index].getPrevKey().equals(startKey)) { return null; } BlockIndexEntry bie = blockIndex[index]; cacheBlock.seek(bie.pos); return bie; } private synchronized void buildIndex(int indexEntries, CachedBlockRead cacheBlock, IndexEntry indexEntry) throws IOException { cacheBlock.seek(0); RelativeKey rk = new RelativeKey(); Value val = new Value(); int interval = indexEntry.getNumEntries() / indexEntries; if (interval <= 32) { return; } // multiple threads could try to create the index with different sizes, do not replace a large // index with a smaller one if (this.blockIndex != null && this.blockIndex.length > indexEntries - 1) { return; } int count = 0; ArrayList<BlockIndexEntry> index = new ArrayList<>(indexEntries - 1); while (count < (indexEntry.getNumEntries() - interval + 1)) { Key myPrevKey = rk.getKey(); int pos = cacheBlock.getPosition(); rk.readFields(cacheBlock); val.readFields(cacheBlock); if (count > 0 && count % interval == 0) { index.add(new BlockIndexEntry(pos, indexEntry.getNumEntries() - count, myPrevKey)); } count++; } this.blockIndex = index.toArray(new BlockIndexEntry[index.size()]); cacheBlock.seek(0); } BlockIndexEntry[] getIndexEntries() { return blockIndex; } @Override public synchronized int weight() { int weight = 0; if (blockIndex != null) { for (BlockIndexEntry blockIndexEntry : blockIndex) { weight += blockIndexEntry.weight(); } } weight += ClassSize.ATOMIC_INTEGER + ClassSize.OBJECT + 2 * ClassSize.REFERENCE + ClassSize.ARRAY; return weight; } }
9,663
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import org.apache.accumulo.core.client.sample.Sampler; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.file.FileOperations; import org.apache.accumulo.core.file.FileSKVIterator; import org.apache.accumulo.core.file.FileSKVWriter; import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachableBuilder; import org.apache.accumulo.core.file.rfile.RFile.RFileSKVIterator; import org.apache.accumulo.core.file.rfile.bcfile.BCFile; import org.apache.accumulo.core.metadata.TabletFile; import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl; import org.apache.accumulo.core.sample.impl.SamplerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.permission.FsPermission; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; public class RFileOperations extends FileOperations { private static final Logger LOG = LoggerFactory.getLogger(RFileOperations.class); private static final Collection<ByteSequence> EMPTY_CF_SET = Collections.emptySet(); private static RFileSKVIterator getReader(FileOptions options) throws IOException { CachableBuilder cb = new CachableBuilder() .fsPath(options.getFileSystem(), options.getFile().getPath(), options.dropCacheBehind) .conf(options.getConfiguration()).fileLen(options.getFileLenCache()) .cacheProvider(options.cacheProvider).readLimiter(options.getRateLimiter()) .cryptoService(options.getCryptoService()); return RFile.getReader(cb, options.getFile()); } @Override protected long getFileSize(FileOptions options) throws IOException { return options.getFileSystem().getFileStatus(options.getFile().getPath()).getLen(); } @Override protected FileSKVIterator openIndex(FileOptions options) throws IOException { return getReader(options).getIndex(); } @Override protected FileSKVIterator openReader(FileOptions options) throws IOException { FileSKVIterator reader = getReader(options); if (options.isSeekToBeginning()) { reader.seek(new Range((Key) null, null), EMPTY_CF_SET, false); } return reader; } @Override protected FileSKVIterator openScanReader(FileOptions options) throws IOException { FileSKVIterator reader = getReader(options); reader.seek(options.getRange(), options.getColumnFamilies(), options.isRangeInclusive()); return reader; } @Override protected FileSKVWriter openWriter(FileOptions options) throws IOException { AccumuloConfiguration acuconf = options.getTableConfiguration(); long blockSize = acuconf.getAsBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE); Preconditions.checkArgument((blockSize < Integer.MAX_VALUE && blockSize > 0), "table.file.compress.blocksize must be greater than 0 and less than " + Integer.MAX_VALUE); long indexBlockSize = acuconf.getAsBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX); Preconditions.checkArgument((indexBlockSize < Integer.MAX_VALUE && indexBlockSize > 0), "table.file.compress.blocksize.index must be greater than 0 and less than " + Integer.MAX_VALUE); SamplerConfigurationImpl samplerConfig = SamplerConfigurationImpl.newSamplerConfig(acuconf); Sampler sampler = null; if (samplerConfig != null) { sampler = SamplerFactory.newSampler(samplerConfig, acuconf, options.isAccumuloStartEnabled()); } String compression = options.getCompression(); compression = compression == null ? options.getTableConfiguration().get(Property.TABLE_FILE_COMPRESSION_TYPE) : compression; FSDataOutputStream outputStream = options.getOutputStream(); Configuration conf = options.getConfiguration(); if (outputStream == null) { int hrep = conf.getInt("dfs.replication", 3); int trep = acuconf.getCount(Property.TABLE_FILE_REPLICATION); int rep = hrep; if (trep > 0 && trep != hrep) { rep = trep; } long hblock = conf.getLong("dfs.block.size", 1 << 26); long tblock = acuconf.getAsBytes(Property.TABLE_FILE_BLOCK_SIZE); long block = hblock; if (tblock > 0) { block = tblock; } int bufferSize = conf.getInt("io.file.buffer.size", 4096); TabletFile file = options.getFile(); FileSystem fs = options.getFileSystem(); if (options.dropCacheBehind) { EnumSet<CreateFlag> set = EnumSet.of(CreateFlag.SYNC_BLOCK, CreateFlag.CREATE); outputStream = fs.create(file.getPath(), FsPermission.getDefault(), set, bufferSize, (short) rep, block, null); try { // Tell the DataNode that the file does not need to be cached in the OS page cache outputStream.setDropBehind(Boolean.TRUE); LOG.trace("Called setDropBehind(TRUE) for stream writing file {}", options.file); } catch (UnsupportedOperationException e) { LOG.debug("setDropBehind not enabled for file: {}", options.file); } catch (IOException e) { LOG.debug("IOException setting drop behind for file: {}, msg: {}", options.file, e.getMessage()); } } else { outputStream = fs.create(file.getPath(), false, bufferSize, (short) rep, block); } } BCFile.Writer _cbw = new BCFile.Writer(outputStream, options.getRateLimiter(), compression, conf, options.cryptoService); return new RFile.Writer(_cbw, (int) blockSize, (int) indexBlockSize, samplerConfig, sampler); } }
9,664
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.function.BiFunction; import org.apache.accumulo.core.cli.ConfigOpts; import org.apache.accumulo.core.crypto.CryptoFactoryLoader; import org.apache.accumulo.core.crypto.CryptoUtils; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.file.FileSKVIterator; import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachableBuilder; import org.apache.accumulo.core.file.rfile.RFile.Reader; import org.apache.accumulo.core.file.rfile.bcfile.PrintBCInfo; import org.apache.accumulo.core.file.rfile.bcfile.Utils; import org.apache.accumulo.core.spi.crypto.CryptoEnvironment; import org.apache.accumulo.core.spi.crypto.CryptoService; import org.apache.accumulo.core.spi.crypto.NoFileEncrypter; import org.apache.accumulo.core.summary.SummaryReader; import org.apache.accumulo.core.util.LocalityGroupUtil; import org.apache.accumulo.core.util.NumUtil; import org.apache.accumulo.start.spi.KeywordExecutable; import org.apache.commons.lang3.StringUtils; import org.apache.commons.math3.stat.descriptive.SummaryStatistics; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.beust.jcommander.Parameter; import com.google.auto.service.AutoService; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; @AutoService(KeywordExecutable.class) public class PrintInfo implements KeywordExecutable { private static final Logger log = LoggerFactory.getLogger(PrintInfo.class); static class Opts extends ConfigOpts { @Parameter(names = {"-d", "--dump"}, description = "dump the key/value pairs") boolean dump = false; @Parameter(names = {"--fullKeys"}, description = "dump full keys regardless of length, do no truncate, implies --dump") boolean fullKeys = false; @Parameter(names = {"--formatter"}, description = "specify a BiFunction<Key, Value, String> class to apply to rfile contents, implies --dump") String formatterClazz = null; @Parameter(names = {"-v", "--vis"}, description = "show visibility metrics") boolean vis = false; @Parameter(names = {"--visHash"}, description = "show visibilities as hashes, implies -v") boolean hash = false; @Parameter(names = {"--histogram"}, description = "print a histogram of the key-value sizes") boolean histogram = false; @Parameter(names = {"--printIndex"}, description = "prints information about all the index entries") boolean printIndex = false; @Parameter(names = {"--useSample"}, description = "Use sample data for --dump, --vis, --histogram options") boolean useSample = false; @Parameter(names = {"--summary"}, description = "Print summary data in file") boolean printSummary = false; @Parameter(names = {"--keyStats"}, description = "print key length statistics for index and all data") boolean keyStats = false; @Parameter(description = " <file> { <file> ... }") List<String> files = new ArrayList<>(); @Parameter(names = {"-c", "--config"}, variableArity = true, description = "Comma-separated Hadoop configuration files") List<String> configFiles = new ArrayList<>(); } static class LogHistogram { long[] countBuckets = new long[11]; long[] sizeBuckets = new long[countBuckets.length]; long totalSize = 0; public void add(int size) { int bucket = (int) Math.log10(size); countBuckets[bucket]++; sizeBuckets[bucket] += size; totalSize += size; } public void print(String indent) { System.out.println(indent + "Up to size Count %-age"); for (int i = 1; i < countBuckets.length; i++) { System.out.printf("%s%11s : %10d %6.2f%%%n", indent, NumUtil.bigNumberForQuantity((long) Math.pow(10, i)), countBuckets[i], sizeBuckets[i] * 100. / totalSize); } } } static class KeyStats { private SummaryStatistics stats = new SummaryStatistics(); private LogHistogram logHistogram = new LogHistogram(); public void add(Key k) { int size = k.getSize(); stats.addValue(size); logHistogram.add(size); } public void print(String indent) { logHistogram.print(indent); System.out.println(); System.out.printf("%smin:%,11.2f max:%,11.2f avg:%,11.2f stddev:%,11.2f\n", indent, stats.getMin(), stats.getMax(), stats.getMean(), stats.getStandardDeviation()); } } public static void main(String[] args) throws Exception { new PrintInfo().execute(args); } @Override public String keyword() { return "rfile-info"; } @Override public String description() { return "Prints rfile info"; } protected Class<? extends BiFunction<Key,Value,String>> getFormatter(String formatterClazz) throws ClassNotFoundException { @SuppressWarnings("unchecked") var clazz = (Class<? extends BiFunction<Key,Value,String>>) this.getClass().getClassLoader() .loadClass(formatterClazz).asSubclass(BiFunction.class); return clazz; } @SuppressFBWarnings(value = "DM_EXIT", justification = "System.exit is fine here because it's a utility class executed by a main()") @Override public void execute(final String[] args) throws Exception { Opts opts = new Opts(); opts.parseArgs("accumulo rfile-info", args); if (opts.files.isEmpty()) { System.err.println("No files were given"); System.exit(1); } if ((opts.fullKeys || opts.dump) && opts.formatterClazz != null) { System.err.println( "--formatter argument is incompatible with --dump or --fullKeys, specify either, not both."); System.exit(1); } var siteConfig = opts.getSiteConfiguration(); Configuration conf = new Configuration(); for (String confFile : opts.configFiles) { log.debug("Adding Hadoop configuration file {}", confFile); conf.addResource(new Path(confFile)); } LogHistogram kvHistogram = new LogHistogram(); KeyStats dataKeyStats = new KeyStats(); KeyStats indexKeyStats = new KeyStats(); for (String arg : opts.files) { Path path = new Path(arg); FileSystem fs = resolveFS(log, conf, path); System.out .println("Reading file: " + path.makeQualified(fs.getUri(), fs.getWorkingDirectory())); printCryptoParams(path, fs); CryptoService cs = CryptoFactoryLoader.getServiceForClient(CryptoEnvironment.Scope.TABLE, siteConfig.getAllCryptoProperties()); CachableBuilder cb = new CachableBuilder().fsPath(fs, path).conf(conf).cryptoService(cs); Reader iter = new RFile.Reader(cb); MetricsGatherer<Map<String,ArrayList<VisibilityMetric>>> vmg = new VisMetricsGatherer(); if (opts.vis || opts.hash) { iter.registerMetrics(vmg); } iter.printInfo(opts.printIndex); System.out.println(); String propsPath = opts.getPropertiesPath(); String[] mainArgs = propsPath == null ? new String[] {arg} : new String[] {"-props", propsPath, arg}; PrintBCInfo printBCInfo = new PrintBCInfo(mainArgs); printBCInfo.setCryptoService(cs); printBCInfo.printMetaBlockInfo(); Map<String,ArrayList<ByteSequence>> localityGroupCF = null; if (opts.histogram || opts.dump || opts.vis || opts.hash || opts.keyStats || opts.fullKeys || !StringUtils.isEmpty(opts.formatterClazz)) { localityGroupCF = iter.getLocalityGroupCF(); FileSKVIterator dataIter; if (opts.useSample) { dataIter = iter.getSample(); if (dataIter == null) { System.out.println("ERROR : This rfile has no sample data"); return; } } else { dataIter = iter; } if (opts.keyStats) { FileSKVIterator indexIter = iter.getIndex(); while (indexIter.hasTop()) { indexKeyStats.add(indexIter.getTopKey()); indexIter.next(); } } BiFunction<Key,Value,String> formatter = null; if (opts.formatterClazz != null) { final Class<? extends BiFunction<Key,Value,String>> formatterClass = getFormatter(opts.formatterClazz); formatter = formatterClass.getConstructor().newInstance(); } else if (opts.fullKeys) { formatter = (key, value) -> key.toStringNoTruncate() + " -> " + value; } else if (opts.dump) { formatter = (key, value) -> key + " -> " + value; } for (String lgName : localityGroupCF.keySet()) { LocalityGroupUtil.seek(dataIter, new Range(), lgName, localityGroupCF); while (dataIter.hasTop()) { Key key = dataIter.getTopKey(); Value value = dataIter.getTopValue(); if (formatter != null) { System.out.println(formatter.apply(key, value)); if (System.out.checkError()) { return; } } if (opts.histogram) { kvHistogram.add(key.getSize() + value.getSize()); } if (opts.keyStats) { dataKeyStats.add(key); } dataIter.next(); } } } if (opts.printSummary) { SummaryReader.print(iter, System.out); } iter.close(); if (opts.vis || opts.hash) { System.out.println(); vmg.printMetrics(opts.hash, "Visibility", System.out); } if (opts.histogram) { System.out.println(); kvHistogram.print(""); } if (opts.keyStats) { System.out.println(); System.out.println("Statistics for keys in data :"); dataKeyStats.print("\t"); System.out.println(); System.out.println("Statistics for keys in index :"); indexKeyStats.print("\t"); } // If the output stream has closed, there is no reason to keep going. if (System.out.checkError()) { return; } } } public static FileSystem resolveFS(Logger log, Configuration conf, Path file) throws IOException { FileSystem hadoopFs = FileSystem.get(conf); FileSystem localFs = FileSystem.getLocal(conf); FileSystem fs; if (file.toString().contains(":")) { fs = file.getFileSystem(conf); } else { log.warn( "Attempting to find file across filesystems. Consider providing URI instead of path"); fs = hadoopFs.exists(file) ? hadoopFs : localFs; // fall back to local } return fs; } /** * Print the unencrypted parameters that tell the Crypto Service how to decrypt the file. This * information is useful for debugging if and how a file was encrypted. */ private void printCryptoParams(Path path, FileSystem fs) { byte[] noCryptoBytes = new NoFileEncrypter().getDecryptionParameters(); try (FSDataInputStream fsDis = fs.open(path)) { long fileLength = fs.getFileStatus(path).getLen(); fsDis.seek(fileLength - 16 - Utils.Version.size() - Long.BYTES); long cryptoParamOffset = fsDis.readLong(); fsDis.seek(cryptoParamOffset); byte[] cryptoParams = CryptoUtils.readParams(fsDis); if (Arrays.equals(noCryptoBytes, cryptoParams)) { System.out.println("No on disk encryption detected."); } else { System.out.println("Encrypted with Params: " + Key.toPrintableString(cryptoParams, 0, cryptoParams.length, cryptoParams.length)); } } catch (IOException ioe) { log.error("Error reading crypto params", ioe); } } }
9,665
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/CompressionAlgorithm.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile.bcfile; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.FilterOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.accumulo.core.spi.file.rfile.compression.CompressionAlgorithmConfiguration; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.io.compress.CodecPool; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionInputStream; import org.apache.hadoop.io.compress.CompressionOutputStream; import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.benmanes.caffeine.cache.Caffeine; import com.github.benmanes.caffeine.cache.LoadingCache; import com.google.common.collect.Maps; /** * There is a static initializer in {@link Compression} that finds all implementations of * {@link CompressionAlgorithmConfiguration} and initializes a {@link CompressionAlgorithm} * instance. This promotes a model of the following call graph of initialization by the static * initializer, followed by calls to {@link #getCodec()}, * {@link #createCompressionStream(OutputStream, Compressor, int)}, and * {@link #createDecompressionStream(InputStream, Decompressor, int)}. In some cases, the * compression and decompression call methods will include a different buffer size for the stream. * Note that if the compressed buffer size requested in these calls is zero, we will not set the * buffer size for that algorithm. Instead, we will use the default within the codec. * <p> * The buffer size is configured in the Codec by way of a Hadoop {@link Configuration} reference. * One approach may be to use the same Configuration object, but when calls are made to * {@code createCompressionStream} and {@code createDecompressionStream} with non default buffer * sizes, the configuration object must be changed. In this case, concurrent calls to * {@code createCompressionStream} and {@code createDecompressionStream} would mutate the * configuration object beneath each other, requiring synchronization to avoid undesirable activity * via co-modification. To avoid synchronization entirely, we will create Codecs with their own * Configuration object and cache them for re-use. A default codec will be statically created, as * mentioned above to ensure we always have a codec available at loader initialization. * <p> * There is a Guava cache defined within Algorithm that allows us to cache Codecs for re-use. Since * they will have their own configuration object and thus do not need to be mutable, there is no * concern for using them concurrently; however, the Guava cache exists to ensure a maximal size of * the cache and efficient and concurrent read/write access to the cache itself. * <p> * To provide Algorithm specific details and to describe what is in code: * <p> * LZO will always have the default LZO codec because the buffer size is never overridden within it. * <p> * LZ4 will always have the default LZ4 codec because the buffer size is never overridden within it. * <p> * GZ will use the default GZ codec for the compression stream, but can potentially use a different * codec instance for the decompression stream if the requested buffer size does not match the * default GZ buffer size of 32k. * <p> * Snappy will use the default Snappy codec with the default buffer size of 64k for the compression * stream, but will use a cached codec if the buffer size differs from the default. */ public class CompressionAlgorithm extends Configured { public static class FinishOnFlushCompressionStream extends FilterOutputStream { FinishOnFlushCompressionStream(CompressionOutputStream cout) { super(cout); } @Override public void write(byte[] b, int off, int len) throws IOException { out.write(b, off, len); } @Override public void flush() throws IOException { CompressionOutputStream cout = (CompressionOutputStream) out; cout.finish(); cout.flush(); cout.resetState(); } } private static final Logger LOG = LoggerFactory.getLogger(CompressionAlgorithm.class); /** * Guava cache to have a limited factory pattern defined in the Algorithm enum. */ private static LoadingCache<Entry<CompressionAlgorithm,Integer>,CompressionCodec> codecCache = Caffeine.newBuilder().maximumSize(25) .build(key -> key.getKey().createNewCodec(key.getValue())); // Data input buffer size to absorb small reads from application. protected static final int DATA_IBUF_SIZE = 1024; // Data output buffer size to absorb small writes from application. protected static final int DATA_OBUF_SIZE = 4 * 1024; // The name of the compression algorithm. private final CompressionAlgorithmConfiguration algorithm; private final AtomicBoolean checked = new AtomicBoolean(false); private transient CompressionCodec codec = null; public CompressionAlgorithm(CompressionAlgorithmConfiguration algorithm, Configuration conf) { this.algorithm = algorithm; setConf(conf); codec = initCodec(checked, algorithm.getDefaultBufferSize(), codec); } /** * Shared function to create new codec objects. It is expected that if buffersize is invalid, a * codec will be created with the default buffer size. */ CompressionCodec createNewCodec(int bufferSize) { return createNewCodec(algorithm.getCodecClassNameProperty(), algorithm.getCodecClassName(), bufferSize, algorithm.getBufferSizeProperty()); } public InputStream createDecompressionStream(InputStream downStream, Decompressor decompressor, int downStreamBufferSize) throws IOException { if (!isSupported()) { throw new IOException("codec class not specified. Did you forget to set property " + algorithm.getCodecClassNameProperty() + "?"); } if (algorithm.cacheCodecsWithNonDefaultSizes()) { return createDecompressionStream(downStream, decompressor, downStreamBufferSize, algorithm.getDefaultBufferSize(), this, codec); } else { InputStream bis = bufferStream(downStream, downStreamBufferSize); CompressionInputStream cis = codec.createInputStream(bis, decompressor); return new BufferedInputStream(cis, DATA_IBUF_SIZE); } } private InputStream createDecompressionStream(final InputStream stream, final Decompressor decompressor, final int bufferSize, final int defaultBufferSize, final CompressionAlgorithm algorithm, CompressionCodec codec) throws IOException { // If the default buffer size is not being used, pull from the loading cache. if (bufferSize != defaultBufferSize) { Entry<CompressionAlgorithm,Integer> sizeOpt = Maps.immutableEntry(algorithm, bufferSize); codec = codecCache.get(sizeOpt); } CompressionInputStream cis = codec.createInputStream(stream, decompressor); return new BufferedInputStream(cis, DATA_IBUF_SIZE); } public OutputStream createCompressionStream(OutputStream downStream, Compressor compressor, int downStreamBufferSize) throws IOException { if (!isSupported()) { throw new IOException("codec class not specified. Did you forget to set property " + algorithm.getCodecClassNameProperty() + "?"); } return createFinishedOnFlushCompressionStream(downStream, compressor, downStreamBufferSize); } boolean isSupported() { return codec != null; } CompressionCodec getCodec() { return codec; } public Compressor getCompressor() { CompressionCodec codec = getCodec(); if (codec != null) { Compressor compressor = CodecPool.getCompressor(codec); if (compressor != null) { if (compressor.finished()) { // Somebody returns the compressor to CodecPool but is still using it. LOG.warn("Compressor obtained from CodecPool already finished()"); } else { LOG.trace("Got a compressor: {}", compressor.hashCode()); } // The following statement is necessary to get around bugs in 0.18 where a compressor is // referenced after it's // returned back to the codec pool. compressor.reset(); } return compressor; } return null; } public void returnCompressor(final Compressor compressor) { if (compressor != null) { LOG.trace("Return a compressor: {}", compressor.hashCode()); CodecPool.returnCompressor(compressor); } } public Decompressor getDecompressor() { CompressionCodec codec = getCodec(); if (codec != null) { Decompressor decompressor = CodecPool.getDecompressor(codec); if (decompressor != null) { if (decompressor.finished()) { // Somebody returns the decompressor to CodecPool but is still using it. LOG.warn("Decompressor obtained from CodecPool already finished()"); } else { LOG.trace("Got a decompressor: {}", decompressor.hashCode()); } // The following statement is necessary to get around bugs in 0.18 where a decompressor is // referenced after // it's returned back to the codec pool. decompressor.reset(); } return decompressor; } return null; } /** * Returns the specified {@link Decompressor} to the codec cache if it is not null. */ public void returnDecompressor(final Decompressor decompressor) { if (decompressor != null) { LOG.trace("Returned a decompressor: {}", decompressor.hashCode()); CodecPool.returnDecompressor(decompressor); } } /** * Returns the name of the compression algorithm. * * @return the name */ public String getName() { return algorithm.getName(); } /** * Initializes and returns a new codec with the specified buffer size if and only if the specified * {@link AtomicBoolean} has a value of false, or returns the specified original coded otherwise. */ private CompressionCodec initCodec(final AtomicBoolean checked, final int bufferSize, final CompressionCodec originalCodec) { if (!checked.get()) { checked.set(true); return createNewCodec(bufferSize); } return originalCodec; } /** * Returns a new {@link CompressionCodec} of the specified type, or the default type if no primary * type is specified. If the specified buffer size is greater than 0, the specified buffer size * configuration option will be updated in the codec's configuration with the buffer size. If the * neither the specified codec type or the default codec type can be found, null will be returned. */ private CompressionCodec createNewCodec(final String codecClazzProp, final String defaultClazz, final int bufferSize, final String bufferSizeConfigOpt) { String clazz = defaultClazz; if (codecClazzProp != null) { clazz = System.getProperty(codecClazzProp, getConf().get(codecClazzProp, defaultClazz)); } try { LOG.info("Trying to load codec class {}", clazz); Configuration config = new Configuration(getConf()); updateBuffer(config, bufferSizeConfigOpt, bufferSize); return (CompressionCodec) ReflectionUtils.newInstance(Class.forName(clazz), config); } catch (ClassNotFoundException e) { LOG.debug( "ClassNotFoundException creating codec class {} for {}. Enable trace logging for stacktrace.", clazz, codecClazzProp); LOG.trace("Unable to load codec class due to ", e); } return null; } /** * Returns a new {@link FinishOnFlushCompressionStream} initialized for the specified output * stream and compressor. */ private OutputStream createFinishedOnFlushCompressionStream(final OutputStream downStream, final Compressor compressor, final int downStreamBufferSize) throws IOException { OutputStream out = bufferStream(downStream, downStreamBufferSize); CompressionOutputStream cos = getCodec().createOutputStream(out, compressor); return new BufferedOutputStream(new FinishOnFlushCompressionStream(cos), DATA_OBUF_SIZE); } /** * Return the given stream wrapped as a {@link BufferedOutputStream} with the given buffer size if * the buffer size is greater than 0, or return the original stream otherwise. */ private OutputStream bufferStream(final OutputStream stream, final int bufferSize) { if (bufferSize > 0) { return new BufferedOutputStream(stream, bufferSize); } return stream; } /** * Return the given stream wrapped as a {@link BufferedInputStream} with the given buffer size if * the buffer size is greater than 0, or return the original stream otherwise. */ private InputStream bufferStream(final InputStream stream, final int bufferSize) { if (bufferSize > 0) { return new BufferedInputStream(stream, bufferSize); } return stream; } /** * Updates the value of the specified buffer size opt in the given {@link Configuration} if the * new buffer size is greater than 0. */ private void updateBuffer(final Configuration config, final String bufferSizeOpt, final int bufferSize) { // Use the buffersize only if it is greater than 0, otherwise use the default defined within // the codec. if (bufferSize > 0) { config.setInt(bufferSizeOpt, bufferSize); } } }
9,666
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Utils.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile.bcfile; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.io.Text; /** * Supporting Utility classes used by TFile, and shared by users of TFile. */ public final class Utils { /** * Prevent the instantiation of Utils. */ private Utils() { // nothing } /** * Encoding an integer into a variable-length encoding format. Synonymous to * <code>Utils#writeVLong(out, n)</code>. * * @param out output stream * @param n The integer to be encoded * @see Utils#writeVLong(DataOutput, long) */ public static void writeVInt(DataOutput out, int n) throws IOException { writeVLong(out, n); } /** * Encoding a Long integer into a variable-length encoding format. * <ul> * <li>if n in [-32, 127): encode in one byte with the actual value. Otherwise, * <li>if n in [-20*2^8, 20*2^8): encode in two bytes: byte[0] = n/256 - 52; byte[1]=n&amp;0xff. * Otherwise, * <li>if n IN [-16*2^16, 16*2^16): encode in three bytes: byte[0]=n/2^16 - 88; * byte[1]=(n&gt;&gt;8)&amp;0xff; byte[2]=n&amp;0xff. Otherwise, * <li>if n in [-8*2^24, 8*2^24): encode in four bytes: byte[0]=n/2^24 - 112; byte[1] = * (n&gt;&gt;16)&amp;0xff; byte[2] = (n&gt;&gt;8)&amp;0xff; byte[3]=n&amp;0xff. Otherwise: * <li>if n in [-2^31, 2^31): encode in five bytes: byte[0]=-125; byte[1] = * (n&gt;&gt;24)&amp;0xff; byte[2]=(n&gt;&gt;16)&amp;0xff; byte[3]=(n&gt;&gt;8)&amp;0xff; * byte[4]=n&amp;0xff; * <li>if n in [-2^39, 2^39): encode in six bytes: byte[0]=-124; byte[1] = (n&gt;&gt;32)&amp;0xff; * byte[2]=(n&gt;&gt;24)&amp;0xff; byte[3]=(n&gt;&gt;16)&amp;0xff; byte[4]=(n&gt;&gt;8)&amp;0xff; * byte[5]=n&amp;0xff * <li>if n in [-2^47, 2^47): encode in seven bytes: byte[0]=-123; byte[1] = * (n&gt;&gt;40)&amp;0xff; byte[2]=(n&gt;&gt;32)&amp;0xff; byte[3]=(n&gt;&gt;24)&amp;0xff; * byte[4]=(n&gt;&gt;16)&amp;0xff; byte[5]=(n&gt;&gt;8)&amp;0xff; byte[6]=n&amp;0xff; * <li>if n in [-2^55, 2^55): encode in eight bytes: byte[0]=-122; byte[1] = * (n&gt;&gt;48)&amp;0xff; byte[2] = (n&gt;&gt;40)&amp;0xff; byte[3]=(n&gt;&gt;32)&amp;0xff; * byte[4]=(n&gt;&gt;24)&amp;0xff; byte[5]=(n&gt;&gt;16)&amp;0xff; byte[6]=(n&gt;&gt;8)&amp;0xff; * byte[7]=n&amp;0xff; * <li>if n in [-2^63, 2^63): encode in nine bytes: byte[0]=-121; byte[1] = * (n&gt;&gt;54)&amp;0xff; byte[2] = (n&gt;&gt;48)&amp;0xff; byte[3] = (n&gt;&gt;40)&amp;0xff; * byte[4]=(n&gt;&gt;32)&amp;0xff; byte[5]=(n&gt;&gt;24)&amp;0xff; byte[6]=(n&gt;&gt;16)&amp;0xff; * byte[7]=(n&gt;&gt;8)&amp;0xff; byte[8]=n&amp;0xff; * </ul> * * @param out output stream * @param n the integer number */ @SuppressWarnings("fallthrough") public static void writeVLong(DataOutput out, long n) throws IOException { if ((n < 128) && (n >= -32)) { out.writeByte((int) n); return; } long un = (n < 0) ? ~n : n; // how many bytes do we need to represent the number with sign bit? int len = (Long.SIZE - Long.numberOfLeadingZeros(un)) / 8 + 1; int firstByte = (int) (n >> ((len - 1) * 8)); switch (len) { case 1: // fall it through to firstByte==-1, len=2. firstByte >>= 8; case 2: if ((firstByte < 20) && (firstByte >= -20)) { out.writeByte(firstByte - 52); out.writeByte((int) n); return; } // fall it through to firstByte==0/-1, len=3. firstByte >>= 8; case 3: if ((firstByte < 16) && (firstByte >= -16)) { out.writeByte(firstByte - 88); out.writeShort((int) n); return; } // fall it through to firstByte==0/-1, len=4. firstByte >>= 8; case 4: if ((firstByte < 8) && (firstByte >= -8)) { out.writeByte(firstByte - 112); out.writeShort(((int) n) >>> 8); out.writeByte((int) n); return; } out.writeByte(len - 129); out.writeInt((int) n); return; case 5: out.writeByte(len - 129); out.writeInt((int) (n >>> 8)); out.writeByte((int) n); return; case 6: out.writeByte(len - 129); out.writeInt((int) (n >>> 16)); out.writeShort((int) n); return; case 7: out.writeByte(len - 129); out.writeInt((int) (n >>> 24)); out.writeShort((int) (n >>> 8)); out.writeByte((int) n); return; case 8: out.writeByte(len - 129); out.writeLong(n); return; default: throw new IllegalStateException("Internal error"); } } /** * Decoding the variable-length integer. Synonymous to <code>(int)Utils#readVLong(in)</code>. * * @param in input stream * @return the decoded integer * * @see Utils#readVLong(DataInput) */ public static int readVInt(DataInput in) throws IOException { long ret = readVLong(in); if ((ret > Integer.MAX_VALUE) || (ret < Integer.MIN_VALUE)) { throw new IllegalStateException("Number too large to be represented as Integer"); } return (int) ret; } /** * Decoding the variable-length integer. Suppose the value of the first byte is FB, and the * following bytes are NB[*]. * <ul> * <li>if (FB &gt;= -32), return (long)FB; * <li>if (FB in [-72, -33]), return (FB+52)&lt;&lt;8 + NB[0]&amp;0xff; * <li>if (FB in [-104, -73]), return (FB+88)&lt;&lt;16 + (NB[0]&amp;0xff)&lt;&lt;8 + * NB[1]&amp;0xff; * <li>if (FB in [-120, -105]), return (FB+112)&lt;&lt;24 + (NB[0]&amp;0xff)&lt;&lt;16 + * (NB[1]&amp;0xff)&lt;&lt;8 + NB[2]&amp;0xff; * <li>if (FB in [-128, -121]), return interpret NB[FB+129] as a signed big-endian integer. * </ul> * * @param in input stream * @return the decoded long integer. */ public static long readVLong(DataInput in) throws IOException { int firstByte = in.readByte(); if (firstByte >= -32) { return firstByte; } switch ((firstByte + 128) / 8) { case 11: case 10: case 9: case 8: case 7: return ((firstByte + 52L) << 8) | in.readUnsignedByte(); case 6: case 5: case 4: case 3: return ((firstByte + 88L) << 16) | in.readUnsignedShort(); case 2: case 1: return ((firstByte + 112L) << 24) | (in.readUnsignedShort() << 8) | in.readUnsignedByte(); case 0: int len = firstByte + 129; switch (len) { case 4: return in.readInt(); case 5: return ((long) in.readInt()) << 8 | in.readUnsignedByte(); case 6: return ((long) in.readInt()) << 16 | in.readUnsignedShort(); case 7: return ((long) in.readInt()) << 24 | (in.readUnsignedShort() << 8) | in.readUnsignedByte(); case 8: return in.readLong(); default: throw new IOException("Corrupted VLong encoding"); } default: throw new IllegalStateException("Internal error"); } } /** * Write a String as a VInt n, followed by n Bytes as in Text format. */ public static void writeString(DataOutput out, String s) throws IOException { if (s != null) { Text text = new Text(s); byte[] buffer = text.getBytes(); int len = text.getLength(); writeVInt(out, len); out.write(buffer, 0, len); } else { writeVInt(out, -1); } } /** * Read a String as a VInt n, followed by n Bytes in Text format. * * @param in The input stream. * @return The string */ public static String readString(DataInput in) throws IOException { int length = readVInt(in); if (length == -1) { return null; } byte[] buffer = new byte[length]; in.readFully(buffer); return Text.decode(buffer); } /** * A generic Version class. We suggest applications built on top of TFile use this class to * maintain version information in their meta blocks. * * A version number consists of a major version and a minor version. The suggested usage of major * and minor version number is to increment major version number when the new storage format is * not backward compatible, and increment the minor version otherwise. */ public static final class Version implements Comparable<Version> { private final short major; private final short minor; /** * Construct the Version object by reading from the input stream. * * @param in input stream */ public Version(DataInput in) throws IOException { major = in.readShort(); minor = in.readShort(); } /** * Constructor. * * @param major major version. * @param minor minor version. */ public Version(short major, short minor) { this.major = major; this.minor = minor; } /** * Write the object to a DataOutput. The serialized format of the Version is major version * followed by minor version, both as big-endian short integers. * * @param out The DataOutput object. */ public void write(DataOutput out) throws IOException { out.writeShort(major); out.writeShort(minor); } /** * Get the size of the serialized Version object. * * @return serialized size of the version object. */ public static int size() { return (Short.SIZE + Short.SIZE) / Byte.SIZE; } @Override public String toString() { return new StringBuilder("v").append(major).append(".").append(minor).toString(); } /** * Test compatibility. * * @param other The Version object to test compatibility with. * @return true if both versions have the same major version number; false otherwise. */ public boolean compatibleWith(Version other) { return major == other.major; } @Override public int compareTo(Version that) { if (major != that.major) { return major - that.major; } return minor - that.minor; } @Override public boolean equals(Object other) { if (this == other) { return true; } if (!(other instanceof Version)) { return false; } return compareTo((Version) other) == 0; } @Override public int hashCode() { return ((major << 16) + minor); } } }
9,667
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/MetaBlockDoesNotExist.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile.bcfile; import java.io.IOException; import java.io.UncheckedIOException; /** * Exception - No such Meta Block with the given name. */ public class MetaBlockDoesNotExist extends IOException { private static final long serialVersionUID = -1365588883639715005L; /** * Constructor * * @param s message. */ MetaBlockDoesNotExist(String s) { super(s); } public MetaBlockDoesNotExist(UncheckedIOException uioe) { super(uioe); } }
9,668
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile.bcfile; import java.util.List; import java.util.Map; import java.util.ServiceLoader; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; import org.apache.accumulo.core.spi.file.rfile.compression.Bzip2; import org.apache.accumulo.core.spi.file.rfile.compression.CompressionAlgorithmConfiguration; import org.apache.accumulo.core.spi.file.rfile.compression.Gz; import org.apache.accumulo.core.spi.file.rfile.compression.Lz4; import org.apache.accumulo.core.spi.file.rfile.compression.Lzo; import org.apache.accumulo.core.spi.file.rfile.compression.NoCompression; import org.apache.accumulo.core.spi.file.rfile.compression.Snappy; import org.apache.accumulo.core.spi.file.rfile.compression.ZStandard; import org.apache.hadoop.conf.Configuration; /** * Compression related stuff. */ public final class Compression { /** * Prevent the instantiation of this class. */ private Compression() { throw new UnsupportedOperationException(); } // All compression-related settings are required to be configured statically in the // Configuration object. protected static final Configuration conf = new Configuration(); private static final ServiceLoader<CompressionAlgorithmConfiguration> FOUND_ALGOS = ServiceLoader.load(CompressionAlgorithmConfiguration.class); private static final Set<CompressionAlgorithmConfiguration> BUILTIN_ALGOS = Set.of(new Gz(), new Bzip2(), new Lz4(), new Lzo(), new NoCompression(), new Snappy(), new ZStandard()); private static final Map<String, CompressionAlgorithm> CONFIGURED_ALGORITHMS = Stream .concat(BUILTIN_ALGOS.stream(), StreamSupport.stream(FOUND_ALGOS.spliterator(), false)) .map(a -> new CompressionAlgorithm(a, conf)) .collect(Collectors.toMap(CompressionAlgorithm::getName, Function.identity())); public static List<String> getSupportedAlgorithms() { return CONFIGURED_ALGORITHMS.entrySet().stream().filter(e -> e.getValue().isSupported()) .map(Map.Entry::getKey).collect(Collectors.toList()); } public static CompressionAlgorithm getCompressionAlgorithmByName(final String name) { CompressionAlgorithm algorithm = CONFIGURED_ALGORITHMS.get(name); if (algorithm != null) { return algorithm; } throw new IllegalArgumentException("Unsupported compression algorithm name: " + name); } }
9,669
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/MetaBlockAlreadyExists.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile.bcfile; import java.io.IOException; /** * Exception - Meta Block with the same name already exists. */ public class MetaBlockAlreadyExists extends IOException { private static final long serialVersionUID = -6797037044124244666L; /** * Constructor * * @param s message. */ MetaBlockAlreadyExists(String s) { super(s); } }
9,670
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/SimpleBufferedOutputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile.bcfile; import java.io.FilterOutputStream; import java.io.IOException; import java.io.OutputStream; /** * A simplified BufferedOutputStream with borrowed buffer, and allow users to see how much data have * been buffered. */ class SimpleBufferedOutputStream extends FilterOutputStream { protected byte[] buf; // the borrowed buffer protected int count = 0; // bytes used in buffer. // Constructor public SimpleBufferedOutputStream(OutputStream out, byte[] buf) { super(out); this.buf = buf; } private void flushBuffer() throws IOException { if (count > 0) { out.write(buf, 0, count); count = 0; } } @Override public void write(int b) throws IOException { if (count >= buf.length) { flushBuffer(); } buf[count++] = (byte) b; } @Override public void write(byte[] b, int off, int len) throws IOException { if (len >= buf.length) { flushBuffer(); out.write(b, off, len); return; } if (len > buf.length - count) { flushBuffer(); } System.arraycopy(b, off, buf, count, len); count += len; } @Override public synchronized void flush() throws IOException { flushBuffer(); out.flush(); } // Get the size of internal buffer being used. public int size() { return count; } }
9,671
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile.bcfile; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Map; import java.util.TreeMap; import org.apache.accumulo.core.crypto.CryptoEnvironmentImpl; import org.apache.accumulo.core.crypto.CryptoUtils; import org.apache.accumulo.core.file.rfile.bcfile.Utils.Version; import org.apache.accumulo.core.file.streams.BoundedRangeFileInputStream; import org.apache.accumulo.core.file.streams.RateLimitedOutputStream; import org.apache.accumulo.core.file.streams.SeekableDataInputStream; import org.apache.accumulo.core.spi.crypto.CryptoEnvironment; import org.apache.accumulo.core.spi.crypto.CryptoEnvironment.Scope; import org.apache.accumulo.core.spi.crypto.CryptoService; import org.apache.accumulo.core.spi.crypto.FileDecrypter; import org.apache.accumulo.core.spi.crypto.FileEncrypter; import org.apache.accumulo.core.spi.crypto.NoFileDecrypter; import org.apache.accumulo.core.spi.crypto.NoFileEncrypter; import org.apache.accumulo.core.util.ratelimit.RateLimiter; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; /** * Block Compressed file, the underlying physical storage layer for TFile. BCFile provides the basic * block level compression for the data block and meta blocks. It is separated from TFile as it may * be used for other block-compressed file implementation. */ public final class BCFile { // the current version of BCFile impl, increment them (major or minor) made // enough changes /** * Simplified encryption interface. Allows more flexible encryption. * * @since 2.0 */ static final Version API_VERSION_3 = new Version((short) 3, (short) 0); /** * Experimental crypto parameters, not flexible. Do not use. */ static final Version API_VERSION_2 = new Version((short) 2, (short) 0); /** * Original BCFile version, prior to encryption. Also, any files before 2.0 that didn't have * encryption were marked with this version. */ static final Version API_VERSION_1 = new Version((short) 1, (short) 0); static final Log LOG = LogFactory.getLog(BCFile.class); private static final String FS_OUTPUT_BUF_SIZE_ATTR = "tfile.fs.output.buffer.size"; private static final String FS_INPUT_BUF_SIZE_ATTR = "tfile.fs.input.buffer.size"; private static int getFSOutputBufferSize(Configuration conf) { return conf.getInt(FS_OUTPUT_BUF_SIZE_ATTR, 256 * 1024); } private static int getFSInputBufferSize(Configuration conf) { return conf.getInt(FS_INPUT_BUF_SIZE_ATTR, 32 * 1024); } /** * Prevent the instantiation of BCFile objects. */ private BCFile() { // nothing } /** * BCFile writer, the entry point for creating a new BCFile. */ public static class Writer implements Closeable { private final RateLimitedOutputStream out; private final Configuration conf; private FileEncrypter encrypter; private CryptoEnvironmentImpl cryptoEnvironment; // the single meta block containing index of compressed data blocks final DataIndex dataIndex; // index for meta blocks final MetaIndex metaIndex; boolean blkInProgress = false; private boolean metaBlkSeen = false; private boolean closed = false; long errorCount = 0; // reusable buffers. private BytesWritable fsOutputBuffer; private long length = 0; public long getLength() { return this.length; } /** * Intermediate class that maintain the state of a Writable Compression Block. */ private static final class WBlockState { private final CompressionAlgorithm compressAlgo; private Compressor compressor; // !null only if using native // Hadoop compression private final RateLimitedOutputStream fsOut; private final OutputStream cipherOut; private final long posStart; private final SimpleBufferedOutputStream fsBufferedOutput; private OutputStream out; public WBlockState(CompressionAlgorithm compressionAlgo, RateLimitedOutputStream fsOut, BytesWritable fsOutputBuffer, Configuration conf, FileEncrypter encrypter) throws IOException { this.compressAlgo = compressionAlgo; this.fsOut = fsOut; this.posStart = fsOut.position(); fsOutputBuffer.setCapacity(getFSOutputBufferSize(conf)); this.fsBufferedOutput = new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes()); this.compressor = compressAlgo.getCompressor(); try { this.cipherOut = encrypter.encryptStream(fsBufferedOutput); this.out = compressionAlgo.createCompressionStream(cipherOut, compressor, 0); } catch (IOException e) { compressAlgo.returnCompressor(compressor); throw e; } } /** * Get the output stream for BlockAppender's consumption. * * @return the output stream suitable for writing block data. */ OutputStream getOutputStream() { return out; } /** * Get the current position in file. * * @return The current byte offset in underlying file. */ long getCurrentPos() { return fsOut.position() + fsBufferedOutput.size(); } long getStartPos() { return posStart; } /** * Current size of compressed data. */ long getCompressedSize() { return getCurrentPos() - posStart; } /** * Finishing up the current block. */ public void finish() throws IOException { try { if (out != null) { out.flush(); // If the cipherOut stream is different from the fsBufferedOutput stream, then we likely // have // an actual encrypted output stream that needs to be closed in order for it // to flush the final bytes to the output stream. We should have set the flag to // make sure that this close does *not* close the underlying stream, so calling // close here should do the write thing. if (fsBufferedOutput != cipherOut) { // Close the cipherOutputStream cipherOut.close(); } out = null; } } finally { compressAlgo.returnCompressor(compressor); compressor = null; } } } /** * Access point to stuff data into a block. * */ public class BlockAppender extends DataOutputStream { private final MetaBlockRegister metaBlockRegister; private final WBlockState wBlkState; private boolean closed = false; /** * Constructor * * @param metaBlockRegister the block register, which is called when the block is closed. * @param wbs The writable compression block state. */ BlockAppender(MetaBlockRegister metaBlockRegister, WBlockState wbs) { super(wbs.getOutputStream()); this.metaBlockRegister = metaBlockRegister; this.wBlkState = wbs; } BlockAppender(WBlockState wbs) { super(wbs.getOutputStream()); this.metaBlockRegister = null; this.wBlkState = wbs; } /** * Get the raw size of the block. * * Caution: size() comes from DataOutputStream which returns Integer.MAX_VALUE on an overflow. * This results in a value of 2GiB meaning that an unknown amount of data, at least 2GiB * large, has been written. RFiles handle this issue by keeping track of the position of * blocks instead of relying on blocks to provide this information. * * @return the number of uncompressed bytes written through the BlockAppender so far. */ public long getRawSize() { return size() & 0x00000000ffffffffL; } /** * Get the compressed size of the block in progress. * * @return the number of compressed bytes written to the underlying FS file. The size may be * smaller than actual need to compress the all data written due to internal buffering * inside the compressor. */ public long getCompressedSize() throws IOException { return wBlkState.getCompressedSize(); } public long getStartPos() { return wBlkState.getStartPos(); } @Override public void flush() { // The down stream is a special kind of stream that finishes a // compression block upon flush. So we disable flush() here. } /** * Signaling the end of write to the block. The block register will be called for registering * the finished block. */ @Override public void close() throws IOException { if (closed) { return; } try { ++errorCount; wBlkState.finish(); if (metaBlockRegister != null) { metaBlockRegister.register(getRawSize(), wBlkState.getStartPos(), wBlkState.getCurrentPos()); } --errorCount; } finally { closed = true; blkInProgress = false; } } } /** * Constructor * * @param fout FS output stream. * @param compressionName Name of the compression algorithm, which will be used for all data * blocks. * @see Compression#getSupportedAlgorithms */ public Writer(FSDataOutputStream fout, RateLimiter writeLimiter, String compressionName, Configuration conf, CryptoService cryptoService) throws IOException { if (fout.getPos() != 0) { throw new IOException("Output file not at zero offset."); } this.out = new RateLimitedOutputStream(fout, writeLimiter); this.conf = conf; dataIndex = new DataIndex(compressionName); metaIndex = new MetaIndex(); fsOutputBuffer = new BytesWritable(); Magic.write(this.out); this.cryptoEnvironment = new CryptoEnvironmentImpl(Scope.TABLE, null, null); this.encrypter = cryptoService.getFileEncrypter(this.cryptoEnvironment); } /** * Close the BCFile Writer. Attempting to use the Writer after calling <code>close</code> is not * allowed and may lead to undetermined results. */ @Override public void close() throws IOException { if (closed) { return; } try { if (errorCount == 0) { if (blkInProgress) { throw new IllegalStateException("Close() called with active block appender."); } // add metaBCFileIndex to metaIndex as the last meta block try (BlockAppender appender = prepareMetaBlock(DataIndex.BLOCK_NAME, getDefaultCompressionAlgorithm())) { dataIndex.write(appender); } long offsetIndexMeta = out.position(); metaIndex.write(out); long offsetCryptoParameter = out.position(); byte[] cryptoParams = this.encrypter.getDecryptionParameters(); out.writeInt(cryptoParams.length); out.write(cryptoParams); out.writeLong(offsetIndexMeta); out.writeLong(offsetCryptoParameter); API_VERSION_3.write(out); Magic.write(out); out.flush(); length = out.position(); out.close(); } } finally { closed = true; } } private CompressionAlgorithm getDefaultCompressionAlgorithm() { return dataIndex.getDefaultCompressionAlgorithm(); } private BlockAppender prepareMetaBlock(String name, CompressionAlgorithm compressAlgo) throws IOException, MetaBlockAlreadyExists { if (blkInProgress) { throw new IllegalStateException("Cannot create Meta Block until previous block is closed."); } if (metaIndex.getMetaByName(name) != null) { throw new MetaBlockAlreadyExists("name=" + name); } MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo); WBlockState wbs = new WBlockState(compressAlgo, out, fsOutputBuffer, conf, encrypter); BlockAppender ba = new BlockAppender(mbr, wbs); blkInProgress = true; metaBlkSeen = true; return ba; } /** * Create a Meta Block and obtain an output stream for adding data into the block. The Meta * Block will be compressed with the same compression algorithm as data blocks. There can only * be one BlockAppender stream active at any time. Regular Blocks may not be created after the * first Meta Blocks. The caller must call BlockAppender.close() to conclude the block creation. * * @param name The name of the Meta Block. The name must not conflict with existing Meta Blocks. * @return The BlockAppender stream * @throws MetaBlockAlreadyExists If the meta block with the name already exists. */ public BlockAppender prepareMetaBlock(String name) throws IOException, MetaBlockAlreadyExists { return prepareMetaBlock(name, getDefaultCompressionAlgorithm()); } /** * Create a Data Block and obtain an output stream for adding data into the block. There can * only be one BlockAppender stream active at any time. Data Blocks may not be created after the * first Meta Blocks. The caller must call BlockAppender.close() to conclude the block creation. * * @return The BlockAppender stream */ public BlockAppender prepareDataBlock() throws IOException { if (blkInProgress) { throw new IllegalStateException("Cannot create Data Block until previous block is closed."); } if (metaBlkSeen) { throw new IllegalStateException("Cannot create Data Block after Meta Blocks."); } WBlockState wbs = new WBlockState(getDefaultCompressionAlgorithm(), out, fsOutputBuffer, conf, encrypter); BlockAppender ba = new BlockAppender(wbs); blkInProgress = true; return ba; } /** * Callback to make sure a meta block is added to the internal list when its stream is closed. */ private class MetaBlockRegister { private final String name; private final CompressionAlgorithm compressAlgo; MetaBlockRegister(String name, CompressionAlgorithm compressAlgo) { this.name = name; this.compressAlgo = compressAlgo; } public void register(long raw, long begin, long end) { metaIndex.addEntry( new MetaIndexEntry(name, compressAlgo, new BlockRegion(begin, end - begin, raw))); } } } /** * BCFile Reader, interface to read the file's data and meta blocks. */ public static class Reader implements Closeable { private final SeekableDataInputStream in; private final Configuration conf; final DataIndex dataIndex; // Index for meta blocks final MetaIndex metaIndex; final Version version; private byte[] decryptionParams; private FileDecrypter decrypter; /** * Intermediate class that maintain the state of a Readable Compression Block. */ private static final class RBlockState { private final CompressionAlgorithm compressAlgo; private Decompressor decompressor; private final BlockRegion region; private final InputStream in; private volatile boolean closed; public <InputStreamType extends InputStream & Seekable> RBlockState( CompressionAlgorithm compressionAlgo, InputStreamType fsin, BlockRegion region, Configuration conf, FileDecrypter decrypter) throws IOException { this.compressAlgo = compressionAlgo; this.region = region; this.decompressor = compressionAlgo.getDecompressor(); BoundedRangeFileInputStream boundedRangeFileInputStream = new BoundedRangeFileInputStream( fsin, this.region.getOffset(), this.region.getCompressedSize()); try { InputStream inputStreamToBeCompressed = decrypter.decryptStream(boundedRangeFileInputStream); this.in = compressAlgo.createDecompressionStream(inputStreamToBeCompressed, decompressor, getFSInputBufferSize(conf)); } catch (IOException e) { compressAlgo.returnDecompressor(decompressor); throw e; } closed = false; } /** * Get the output stream for BlockAppender's consumption. * * @return the output stream suitable for writing block data. */ public InputStream getInputStream() { return in; } public BlockRegion getBlockRegion() { return region; } public void finish() throws IOException { synchronized (in) { if (!closed) { try { in.close(); } finally { closed = true; if (decompressor != null) { try { compressAlgo.returnDecompressor(decompressor); } finally { decompressor = null; } } } } } } } /** * Access point to read a block. */ public static class BlockReader extends DataInputStream { private final RBlockState rBlkState; private boolean closed = false; BlockReader(RBlockState rbs) { super(rbs.getInputStream()); rBlkState = rbs; } /** * Finishing reading the block. Release all resources. */ @Override public void close() throws IOException { if (closed) { return; } try { // Do not set rBlkState to null. People may access stats after calling // close(). rBlkState.finish(); } finally { closed = true; } } /** * Get the uncompressed size of the block. * * @return uncompressed size of the block. */ public long getRawSize() { return rBlkState.getBlockRegion().getRawSize(); } } public byte[] serializeMetadata(int maxSize) { try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(baos); metaIndex.write(out); if (out.size() > maxSize) { return null; } dataIndex.write(out); if (out.size() > maxSize) { return null; } CryptoUtils.writeParams(this.decryptionParams, out); if (out.size() > maxSize) { return null; } out.close(); return baos.toByteArray(); } catch (IOException e) { throw new UncheckedIOException(e); } } public <InputStreamType extends InputStream & Seekable> Reader(InputStreamType fin, long fileLength, Configuration conf, CryptoService cryptoService) throws IOException { this.in = new SeekableDataInputStream(fin); this.conf = conf; // Move the cursor to grab the version and the magic first this.in.seek(fileLength - Magic.size() - Version.size()); version = new Version(this.in); Magic.readAndVerify(this.in); // Do a version check - API_VERSION_2 used experimental crypto parameters, no longer supported if (!version.compatibleWith(BCFile.API_VERSION_3) && !version.compatibleWith(BCFile.API_VERSION_1)) { throw new IOException("Unsupported BCFile Version found: " + version + ". " + "Only support " + API_VERSION_1 + " or " + API_VERSION_3); } // Read the right number offsets based on version long offsetIndexMeta = 0; long offsetCryptoParameters = 0; if (version.equals(API_VERSION_1)) { this.in.seek(fileLength - Magic.size() - Version.size() - Long.BYTES); offsetIndexMeta = this.in.readLong(); } else { this.in.seek(fileLength - Magic.size() - Version.size() - 16); // 2 * Long.BYTES = 16 offsetIndexMeta = this.in.readLong(); offsetCryptoParameters = this.in.readLong(); } // read meta index this.in.seek(offsetIndexMeta); metaIndex = new MetaIndex(this.in); CryptoEnvironment cryptoEnvironment = null; // backwards compatibility if (version.equals(API_VERSION_1)) { LOG.trace("Found a version 1 file to read."); decryptionParams = new NoFileEncrypter().getDecryptionParameters(); this.decrypter = new NoFileDecrypter(); } else { // read crypto parameters and get decrypter this.in.seek(offsetCryptoParameters); decryptionParams = CryptoUtils.readParams(this.in); cryptoEnvironment = new CryptoEnvironmentImpl(Scope.TABLE, null, decryptionParams); this.decrypter = cryptoService.getFileDecrypter(cryptoEnvironment); } // read data:BCFile.index, the data block index try (BlockReader blockR = getMetaBlock(DataIndex.BLOCK_NAME)) { dataIndex = new DataIndex(blockR); } } public <InputStreamType extends InputStream & Seekable> Reader(byte[] serializedMetadata, InputStreamType fin, Configuration conf, CryptoService cryptoService) throws IOException { this.in = new SeekableDataInputStream(fin); this.conf = conf; ByteArrayInputStream bais = new ByteArrayInputStream(serializedMetadata); DataInputStream dis = new DataInputStream(bais); version = null; metaIndex = new MetaIndex(dis); dataIndex = new DataIndex(dis); decryptionParams = CryptoUtils.readParams(dis); CryptoEnvironmentImpl env = new CryptoEnvironmentImpl(Scope.TABLE, null, decryptionParams); this.decrypter = cryptoService.getFileDecrypter(env); } /** * Finishing reading the BCFile. Release all resources. */ @Override public void close() { // nothing to be done now } /** * Get the number of data blocks. * * @return the number of data blocks. */ public int getBlockCount() { return dataIndex.getBlockRegionList().size(); } /** * Stream access to a Meta Block. * * @param name meta block name * @return BlockReader input stream for reading the meta block. * @throws MetaBlockDoesNotExist The Meta Block with the given name does not exist. */ public BlockReader getMetaBlock(String name) throws IOException, MetaBlockDoesNotExist { MetaIndexEntry imeBCIndex = metaIndex.getMetaByName(name); if (imeBCIndex == null) { throw new MetaBlockDoesNotExist("name=" + name); } BlockRegion region = imeBCIndex.getRegion(); return createReader(imeBCIndex.getCompressionAlgorithm(), region); } public long getMetaBlockRawSize(String name) throws IOException, MetaBlockDoesNotExist { MetaIndexEntry imeBCIndex = metaIndex.getMetaByName(name); if (imeBCIndex == null) { throw new MetaBlockDoesNotExist("name=" + name); } return imeBCIndex.getRegion().getRawSize(); } /** * Stream access to a Data Block. * * @param blockIndex 0-based data block index. * @return BlockReader input stream for reading the data block. */ public BlockReader getDataBlock(int blockIndex) throws IOException { if (blockIndex < 0 || blockIndex >= getBlockCount()) { throw new IndexOutOfBoundsException( String.format("blockIndex=%d, numBlocks=%d", blockIndex, getBlockCount())); } BlockRegion region = dataIndex.getBlockRegionList().get(blockIndex); return createReader(dataIndex.getDefaultCompressionAlgorithm(), region); } public BlockReader getDataBlock(long offset, long compressedSize, long rawSize) throws IOException { BlockRegion region = new BlockRegion(offset, compressedSize, rawSize); return createReader(dataIndex.getDefaultCompressionAlgorithm(), region); } public long getDataBlockRawSize(int blockIndex) { if (blockIndex < 0 || blockIndex >= getBlockCount()) { throw new IndexOutOfBoundsException( String.format("blockIndex=%d, numBlocks=%d", blockIndex, getBlockCount())); } return dataIndex.getBlockRegionList().get(blockIndex).getRawSize(); } private BlockReader createReader(CompressionAlgorithm compressAlgo, BlockRegion region) throws IOException { RBlockState rbs = new RBlockState(compressAlgo, in, region, conf, decrypter); return new BlockReader(rbs); } } /** * Index for all Meta blocks. */ static class MetaIndex { // use a tree map, for getting a meta block entry by name final Map<String,MetaIndexEntry> index; // for write public MetaIndex() { index = new TreeMap<>(); } // for read, construct the map from the file public MetaIndex(DataInput in) throws IOException { int count = Utils.readVInt(in); index = new TreeMap<>(); for (int nx = 0; nx < count; nx++) { MetaIndexEntry indexEntry = new MetaIndexEntry(in); index.put(indexEntry.getMetaName(), indexEntry); } } public void addEntry(MetaIndexEntry indexEntry) { index.put(indexEntry.getMetaName(), indexEntry); } public MetaIndexEntry getMetaByName(String name) { return index.get(name); } public void write(DataOutput out) throws IOException { Utils.writeVInt(out, index.size()); for (MetaIndexEntry indexEntry : index.values()) { indexEntry.write(out); } } } /** * An entry describes a meta block in the MetaIndex. */ static final class MetaIndexEntry { private final String metaName; private final CompressionAlgorithm compressionAlgorithm; private static final String defaultPrefix = "data:"; private final BlockRegion region; public MetaIndexEntry(DataInput in) throws IOException { String fullMetaName = Utils.readString(in); if (fullMetaName.startsWith(defaultPrefix)) { metaName = fullMetaName.substring(defaultPrefix.length(), fullMetaName.length()); } else { throw new IOException("Corrupted Meta region Index"); } compressionAlgorithm = Compression.getCompressionAlgorithmByName(Utils.readString(in)); region = new BlockRegion(in); } public MetaIndexEntry(String metaName, CompressionAlgorithm compressionAlgorithm, BlockRegion region) { this.metaName = metaName; this.compressionAlgorithm = compressionAlgorithm; this.region = region; } public String getMetaName() { return metaName; } public CompressionAlgorithm getCompressionAlgorithm() { return compressionAlgorithm; } public BlockRegion getRegion() { return region; } public void write(DataOutput out) throws IOException { Utils.writeString(out, defaultPrefix + metaName); Utils.writeString(out, compressionAlgorithm.getName()); region.write(out); } } /** * Index of all compressed data blocks. */ static class DataIndex { static final String BLOCK_NAME = "BCFile.index"; private final CompressionAlgorithm defaultCompressionAlgorithm; // for data blocks, each entry specifies a block's offset, compressed size // and raw size private final ArrayList<BlockRegion> listRegions; // for read, deserialized from a file public DataIndex(DataInput in) throws IOException { defaultCompressionAlgorithm = Compression.getCompressionAlgorithmByName(Utils.readString(in)); int n = Utils.readVInt(in); listRegions = new ArrayList<>(n); for (int i = 0; i < n; i++) { BlockRegion region = new BlockRegion(in); listRegions.add(region); } } // for write public DataIndex(String defaultCompressionAlgorithmName) { this.defaultCompressionAlgorithm = Compression.getCompressionAlgorithmByName(defaultCompressionAlgorithmName); listRegions = new ArrayList<>(); } public CompressionAlgorithm getDefaultCompressionAlgorithm() { return defaultCompressionAlgorithm; } public ArrayList<BlockRegion> getBlockRegionList() { return listRegions; } public void write(DataOutput out) throws IOException { Utils.writeString(out, defaultCompressionAlgorithm.getName()); Utils.writeVInt(out, listRegions.size()); for (BlockRegion region : listRegions) { region.write(out); } } } /** * Magic number uniquely identifying a BCFile in the header/footer. */ static final class Magic { private static final byte[] AB_MAGIC_BCFILE = { // ... total of 16 bytes (byte) 0xd1, (byte) 0x11, (byte) 0xd3, (byte) 0x68, (byte) 0x91, (byte) 0xb5, (byte) 0xd7, (byte) 0xb6, (byte) 0x39, (byte) 0xdf, (byte) 0x41, (byte) 0x40, (byte) 0x92, (byte) 0xba, (byte) 0xe1, (byte) 0x50}; public static void readAndVerify(DataInput in) throws IOException { byte[] abMagic = new byte[size()]; in.readFully(abMagic); // check against AB_MAGIC_BCFILE, if not matching, throw an // Exception if (!Arrays.equals(abMagic, AB_MAGIC_BCFILE)) { throw new IOException("Not a valid BCFile."); } } public static void write(DataOutput out) throws IOException { out.write(AB_MAGIC_BCFILE); } public static int size() { return AB_MAGIC_BCFILE.length; } } /** * Block region. */ static final class BlockRegion { private final long offset; private final long compressedSize; private final long rawSize; public BlockRegion(DataInput in) throws IOException { offset = Utils.readVLong(in); compressedSize = Utils.readVLong(in); rawSize = Utils.readVLong(in); } public BlockRegion(long offset, long compressedSize, long rawSize) { this.offset = offset; this.compressedSize = compressedSize; this.rawSize = rawSize; } public void write(DataOutput out) throws IOException { Utils.writeVLong(out, offset); Utils.writeVLong(out, compressedSize); Utils.writeVLong(out, rawSize); } public long getOffset() { return offset; } public long getCompressedSize() { return compressedSize; } public long getRawSize() { return rawSize; } } }
9,672
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/IdentityCodec.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile.bcfile; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionInputStream; import org.apache.hadoop.io.compress.CompressionOutputStream; import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; public class IdentityCodec implements CompressionCodec { /* * Copied from org/apache/hadoop/io/compress/FakeCompressor.java */ public static class IdentityCompressor implements Compressor { private boolean finish; private boolean finished; private int nread; private int nwrite; private byte[] userBuf; private int userBufOff; private int userBufLen; @Override public int compress(byte[] b, int off, int len) throws IOException { int n = Math.min(len, userBufLen); if (userBuf != null && b != null) { System.arraycopy(userBuf, userBufOff, b, off, n); } userBufOff += n; userBufLen -= n; nwrite += n; if (finish && userBufLen <= 0) { finished = true; } return n; } @Override public void end() { // nop } @Override public void finish() { finish = true; } @Override public boolean finished() { return finished; } @Override public long getBytesRead() { return nread; } @Override public long getBytesWritten() { return nwrite; } @Override public boolean needsInput() { return userBufLen <= 0; } @Override public void reset() { finish = false; finished = false; nread = 0; nwrite = 0; userBuf = null; userBufOff = 0; userBufLen = 0; } @Override public void setDictionary(byte[] b, int off, int len) { // nop } @Override public void setInput(byte[] b, int off, int len) { nread += len; userBuf = b; userBufOff = off; userBufLen = len; } @Override public void reinit(Configuration conf) { // nop } } /* * Copied from org/apache/hadoop/io/compress/FakeDecompressor.java */ public static class IdentityDecompressor implements Decompressor { private boolean finish; private boolean finished; private int nread; private int nwrite; private byte[] userBuf; private int userBufOff; private int userBufLen; @Override public int decompress(byte[] b, int off, int len) throws IOException { int n = Math.min(len, userBufLen); if (userBuf != null && b != null) { System.arraycopy(userBuf, userBufOff, b, off, n); } userBufOff += n; userBufLen -= n; nwrite += n; if (finish && userBufLen <= 0) { finished = true; } return n; } @Override public void end() { // nop } @Override public boolean finished() { return finished; } public long getBytesRead() { return nread; } public long getBytesWritten() { return nwrite; } @Override public boolean needsDictionary() { return false; } @Override public boolean needsInput() { return userBufLen <= 0; } @Override public void reset() { finish = false; finished = false; nread = 0; nwrite = 0; userBuf = null; userBufOff = 0; userBufLen = 0; } @Override public void setDictionary(byte[] b, int off, int len) { // nop } @Override public void setInput(byte[] b, int off, int len) { nread += len; userBuf = b; userBufOff = off; userBufLen = len; } @Override public int getRemaining() { return 0; } } public static class IdentityCompressionInputStream extends CompressionInputStream { protected IdentityCompressionInputStream(InputStream in) throws IOException { super(in); } @Override public int read(byte[] b, int off, int len) throws IOException { return in.read(b, off, len); } @Override public void resetState() throws IOException {} @Override public int read() throws IOException { return in.read(); } } public static class IdentityCompressionOutputStream extends CompressionOutputStream { public IdentityCompressionOutputStream(OutputStream out) { super(out); } @Override public void finish() throws IOException {} @Override public void resetState() throws IOException {} @Override public void write(byte[] b, int off, int len) throws IOException { out.write(b, off, len); } @Override public void write(int b) throws IOException { out.write(b); } } @Override public Compressor createCompressor() { return new IdentityCompressor(); } @Override public Decompressor createDecompressor() { return new IdentityDecompressor(); } @Override public CompressionInputStream createInputStream(InputStream in) throws IOException { return new IdentityCompressionInputStream(in); } @Override public CompressionInputStream createInputStream(InputStream in, Decompressor d) throws IOException { return new IdentityCompressionInputStream(in); } @Override public CompressionOutputStream createOutputStream(OutputStream os) throws IOException { return new IdentityCompressionOutputStream(os); } @Override public CompressionOutputStream createOutputStream(OutputStream os, Compressor c) throws IOException { return new IdentityCompressionOutputStream(os); } @Override public Class<? extends Compressor> getCompressorType() { return IdentityCompressor.class; } @Override public Class<? extends Decompressor> getDecompressorType() { return IdentityDecompressor.class; } @Override public String getDefaultExtension() { return ".identity"; } }
9,673
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/PrintBCInfo.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.rfile.bcfile; import java.io.IOException; import java.io.PrintStream; import java.util.Map.Entry; import java.util.Set; import org.apache.accumulo.core.cli.ConfigOpts; import org.apache.accumulo.core.conf.SiteConfiguration; import org.apache.accumulo.core.file.rfile.bcfile.BCFile.MetaIndexEntry; import org.apache.accumulo.core.spi.crypto.CryptoService; import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import com.beust.jcommander.Parameter; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; @SuppressFBWarnings(value = "DM_EXIT", justification = "System.exit is fine here because it's a utility class executed by a main()") public class PrintBCInfo { SiteConfiguration siteConfig; Configuration conf; FileSystem fs; Path path; CryptoService cryptoService = NoCryptoServiceFactory.NONE; public void printMetaBlockInfo() throws IOException { FSDataInputStream fsin = fs.open(path); try (BCFile.Reader bcfr = new BCFile.Reader(fsin, fs.getFileStatus(path).getLen(), conf, cryptoService)) { Set<Entry<String,MetaIndexEntry>> es = bcfr.metaIndex.index.entrySet(); for (Entry<String,MetaIndexEntry> entry : es) { PrintStream out = System.out; out.println("Meta block : " + entry.getKey()); out.println(" Raw size : " + String.format("%,d", entry.getValue().getRegion().getRawSize()) + " bytes"); out.println(" Compressed size : " + String.format("%,d", entry.getValue().getRegion().getCompressedSize()) + " bytes"); out.println( " Compression type : " + entry.getValue().getCompressionAlgorithm().getName()); out.println(); } } } static class Opts extends ConfigOpts { @Parameter(description = " <file>") String file; } public PrintBCInfo(String[] args) throws Exception { Opts opts = new Opts(); opts.parseArgs("PrintInfo", args); if (opts.file.isEmpty()) { System.err.println("No files were given"); System.exit(-1); } siteConfig = opts.getSiteConfiguration(); conf = new Configuration(); FileSystem hadoopFs = FileSystem.get(conf); FileSystem localFs = FileSystem.getLocal(conf); path = new Path(opts.file); if (opts.file.contains(":")) { fs = path.getFileSystem(conf); } else { fs = hadoopFs.exists(path) ? hadoopFs : localFs; // fall back to local } } public CryptoService getCryptoService() { return cryptoService; } public void setCryptoService(CryptoService cryptoService) { this.cryptoService = cryptoService; } }
9,674
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/ScanCacheProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.impl; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.spi.cache.BlockCache; import org.apache.accumulo.core.spi.scan.ScanDispatch; public class ScanCacheProvider implements CacheProvider { private final BlockCache indexCache; private final BlockCache dataCache; public ScanCacheProvider(AccumuloConfiguration tableConfig, ScanDispatch dispatch, BlockCache indexCache, BlockCache dataCache) { switch (dispatch.getIndexCacheUsage()) { case ENABLED: this.indexCache = indexCache; break; case DISABLED: this.indexCache = null; break; case OPPORTUNISTIC: this.indexCache = new OpportunisticBlockCache(indexCache); break; case TABLE: this.indexCache = tableConfig.getBoolean(Property.TABLE_INDEXCACHE_ENABLED) ? indexCache : null; break; default: throw new IllegalStateException(); } switch (dispatch.getDataCacheUsage()) { case ENABLED: this.dataCache = dataCache; break; case DISABLED: this.dataCache = null; break; case OPPORTUNISTIC: this.dataCache = new OpportunisticBlockCache(dataCache); break; case TABLE: this.dataCache = tableConfig.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED) ? dataCache : null; break; default: throw new IllegalStateException(); } } @Override public BlockCache getDataCache() { return dataCache; } @Override public BlockCache getIndexCache() { return indexCache; } }
9,675
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CacheProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.impl; import org.apache.accumulo.core.spi.cache.BlockCache; public interface CacheProvider { static final CacheProvider NULL_PROVIDER = new BasicCacheProvider(null, null); BlockCache getDataCache(); BlockCache getIndexCache(); }
9,676
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/OpportunisticBlockCache.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.impl; import org.apache.accumulo.core.spi.cache.BlockCache; import org.apache.accumulo.core.spi.cache.CacheEntry; public class OpportunisticBlockCache implements BlockCache { private BlockCache cache; public OpportunisticBlockCache(BlockCache cache) { this.cache = cache; } @Override public CacheEntry cacheBlock(String blockName, byte[] buf) { return null; } @Override public CacheEntry getBlock(String blockName) { return cache.getBlock(blockName); } @Override public CacheEntry getBlock(String blockName, Loader loader) { return cache.getBlock(blockName); } @Override public long getMaxHeapSize() { return cache.getMaxHeapSize(); } @Override public long getMaxSize() { return cache.getMaxSize(); } @Override public Stats getStats() { return cache.getStats(); } }
9,677
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/BasicCacheProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.impl; import org.apache.accumulo.core.spi.cache.BlockCache; public class BasicCacheProvider implements CacheProvider { private final BlockCache indexCache; private final BlockCache dataCache; public BasicCacheProvider(BlockCache indexCache, BlockCache dataCache) { this.indexCache = indexCache; this.dataCache = dataCache; } @Override public BlockCache getDataCache() { return dataCache; } @Override public BlockCache getIndexCache() { return indexCache; } }
9,678
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/SeekableByteArrayInputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.impl; import static java.util.Objects.requireNonNull; import java.io.IOException; import java.io.InputStream; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.IntBinaryOperator; /** * This class is like byte array input stream with two differences. It supports seeking and avoids * synchronization. */ public class SeekableByteArrayInputStream extends InputStream { private final byte[] buffer; private final AtomicInteger cur = new AtomicInteger(0); private final int max; @Override public int read() { // advance the pointer by 1 if we haven't reached the end final int currentValue = cur.getAndAccumulate(1, (v, x) -> v < max ? v + x : v); if (currentValue < max) { return buffer[currentValue] & 0xff; } else { return -1; } } @Override public int read(byte[] b, int offset, int length) { if (b == null) { throw new NullPointerException(); } if (length < 0 || offset < 0 || length > b.length - offset) { throw new IndexOutOfBoundsException(); } if (length == 0) { return 0; } // compute how much to read, based on what's left available IntBinaryOperator add = (cur1, length1) -> { final int available = max - cur1; if (available <= 0) { return cur1; } else if (length1 > available) { length1 = available; } return cur1 + length1; }; final int currentValue = cur.getAndAccumulate(length, add); final int avail = max - currentValue; if (avail <= 0) { return -1; } if (length > avail) { length = avail; } System.arraycopy(buffer, currentValue, b, offset, length); return length; } @Override public long skip(long requestedSkip) { // actual skip is at least 0, but no more than what's available BiFunction<Integer,Integer,Integer> skipValue = (current, skip) -> Math.max(0, Math.min(max - current, skip)); // compute how much to advance, based on actual amount skipped IntBinaryOperator add = (cur1, skip) -> cur1 + skipValue.apply(cur1, skip); // advance the pointer and return the actual amount skipped int currentValue = cur.getAndAccumulate((int) requestedSkip, add); return skipValue.apply(currentValue, (int) requestedSkip); } @Override public int available() { return max - cur.get(); } @Override public boolean markSupported() { return false; } @Override public synchronized void mark(int readAheadLimit) { throw new UnsupportedOperationException(); } @Override public synchronized void reset() { throw new UnsupportedOperationException(); } @Override public void close() throws IOException {} public SeekableByteArrayInputStream(byte[] buf) { requireNonNull(buf, "bug argument was null"); this.buffer = buf; this.max = buf.length; } public SeekableByteArrayInputStream(byte[] buf, int maxOffset) { requireNonNull(buf, "bug argument was null"); this.buffer = buf; this.max = maxOffset; } public void seek(int position) { if (position < 0 || position >= max) { throw new IllegalArgumentException("position = " + position + " maxOffset = " + max); } this.cur.set(position); } public int getPosition() { return this.cur.get(); } byte[] getBuffer() { return buffer; } }
9,679
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.impl; import java.io.Closeable; import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; import java.util.Collections; import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import org.apache.accumulo.core.file.rfile.BlockIndex; import org.apache.accumulo.core.file.rfile.bcfile.BCFile; import org.apache.accumulo.core.file.rfile.bcfile.BCFile.Reader.BlockReader; import org.apache.accumulo.core.file.rfile.bcfile.MetaBlockDoesNotExist; import org.apache.accumulo.core.file.streams.RateLimitedInputStream; import org.apache.accumulo.core.spi.cache.BlockCache; import org.apache.accumulo.core.spi.cache.BlockCache.Loader; import org.apache.accumulo.core.spi.cache.CacheEntry; import org.apache.accumulo.core.spi.crypto.CryptoService; import org.apache.accumulo.core.util.ratelimit.RateLimiter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Seekable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.benmanes.caffeine.cache.Cache; /** * This is a wrapper class for BCFile that includes a cache for independent caches for datablocks * and metadatablocks */ public class CachableBlockFile { private CachableBlockFile() {} private static final Logger log = LoggerFactory.getLogger(CachableBlockFile.class); private interface IoeSupplier<T> { T get() throws IOException; } public static String pathToCacheId(Path p) { return p.toString(); } public static class CachableBuilder { String cacheId = null; IoeSupplier<InputStream> inputSupplier = null; IoeSupplier<Long> lengthSupplier = null; Cache<String,Long> fileLenCache = null; volatile CacheProvider cacheProvider = CacheProvider.NULL_PROVIDER; RateLimiter readLimiter = null; Configuration hadoopConf = null; CryptoService cryptoService = null; public CachableBuilder conf(Configuration hadoopConf) { this.hadoopConf = hadoopConf; return this; } public CachableBuilder fsPath(FileSystem fs, Path dataFile) { return fsPath(fs, dataFile, false); } public CachableBuilder fsPath(FileSystem fs, Path dataFile, boolean dropCacheBehind) { this.cacheId = pathToCacheId(dataFile); this.inputSupplier = () -> { FSDataInputStream is = fs.open(dataFile); if (dropCacheBehind) { // Tell the DataNode that the write ahead log does not need to be cached in the OS page // cache try { is.setDropBehind(Boolean.TRUE); log.trace("Called setDropBehind(TRUE) for stream reading file {}", dataFile); } catch (UnsupportedOperationException e) { log.debug("setDropBehind not enabled for wal file: {}", dataFile); } catch (IOException e) { log.debug("IOException setting drop behind for file: {}, msg: {}", dataFile, e.getMessage()); } } return is; }; this.lengthSupplier = () -> fs.getFileStatus(dataFile).getLen(); return this; } public CachableBuilder input(InputStream is, String cacheId) { this.cacheId = cacheId; this.inputSupplier = () -> is; return this; } public CachableBuilder length(long len) { this.lengthSupplier = () -> len; return this; } public CachableBuilder fileLen(Cache<String,Long> cache) { this.fileLenCache = cache; return this; } public CachableBuilder cacheProvider(CacheProvider cacheProvider) { this.cacheProvider = cacheProvider; return this; } public CachableBuilder readLimiter(RateLimiter readLimiter) { this.readLimiter = readLimiter; return this; } public CachableBuilder cryptoService(CryptoService cryptoService) { this.cryptoService = cryptoService; return this; } } /** * Class wraps the BCFile reader. */ public static class Reader implements Closeable { private final RateLimiter readLimiter; // private BCFile.Reader _bc; private final String cacheId; private CacheProvider cacheProvider; private Cache<String,Long> fileLenCache = null; private volatile InputStream fin = null; private boolean closed = false; private final Configuration conf; private final CryptoService cryptoService; private final IoeSupplier<InputStream> inputSupplier; private final IoeSupplier<Long> lengthSupplier; private final AtomicReference<BCFile.Reader> bcfr = new AtomicReference<>(); private static final String ROOT_BLOCK_NAME = "!RootData"; // ACCUMULO-4716 - Define MAX_ARRAY_SIZE smaller than Integer.MAX_VALUE to prevent possible // OutOfMemory // errors when allocating arrays - described in stackoverflow post: // https://stackoverflow.com/a/8381338 private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; private long getCachedFileLen() throws IOException { try { return fileLenCache.get(cacheId, k -> { try { return lengthSupplier.get(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } catch (UncheckedIOException e) { throw new IOException("Failed to get " + cacheId + " len from cache ", e); } } private BCFile.Reader getBCFile(byte[] serializedMetadata) throws IOException { BCFile.Reader reader = bcfr.get(); if (reader == null) { RateLimitedInputStream fsIn = new RateLimitedInputStream((InputStream & Seekable) inputSupplier.get(), readLimiter); BCFile.Reader tmpReader = null; if (serializedMetadata == null) { if (fileLenCache == null) { tmpReader = new BCFile.Reader(fsIn, lengthSupplier.get(), conf, cryptoService); } else { long len = getCachedFileLen(); try { tmpReader = new BCFile.Reader(fsIn, len, conf, cryptoService); } catch (Exception e) { log.debug("Failed to open {}, clearing file length cache and retrying", cacheId, e); fileLenCache.invalidate(cacheId); } if (tmpReader == null) { len = getCachedFileLen(); tmpReader = new BCFile.Reader(fsIn, len, conf, cryptoService); } } } else { tmpReader = new BCFile.Reader(serializedMetadata, fsIn, conf, cryptoService); } if (bcfr.compareAndSet(null, tmpReader)) { fin = fsIn; return tmpReader; } else { fsIn.close(); tmpReader.close(); return bcfr.get(); } } return reader; } private BCFile.Reader getBCFile() throws IOException { BlockCache _iCache = cacheProvider.getIndexCache(); if (_iCache != null) { CacheEntry mce = _iCache.getBlock(cacheId + ROOT_BLOCK_NAME, new BCFileLoader()); if (mce != null) { return getBCFile(mce.getBuffer()); } } return getBCFile(null); } private class BCFileLoader implements Loader { @Override public Map<String,Loader> getDependencies() { return Collections.emptyMap(); } @Override public byte[] load(int maxSize, Map<String,byte[]> dependencies) { try { return getBCFile(null).serializeMetadata(maxSize); } catch (IOException e) { throw new UncheckedIOException(e); } } } private class RawBlockLoader extends BaseBlockLoader { private long offset; private long compressedSize; private long rawSize; private RawBlockLoader(long offset, long compressedSize, long rawSize, boolean loadingMeta) { super(loadingMeta); this.offset = offset; this.compressedSize = compressedSize; this.rawSize = rawSize; } @Override BlockReader getBlockReader(int maxSize, BCFile.Reader bcfr) throws IOException { if (rawSize > Math.min(maxSize, MAX_ARRAY_SIZE)) { return null; } return bcfr.getDataBlock(offset, compressedSize, rawSize); } @Override String getBlockId() { return "raw-(" + offset + "," + compressedSize + "," + rawSize + ")"; } } private class OffsetBlockLoader extends BaseBlockLoader { private int blockIndex; private OffsetBlockLoader(int blockIndex, boolean loadingMeta) { super(loadingMeta); this.blockIndex = blockIndex; } @Override BlockReader getBlockReader(int maxSize, BCFile.Reader bcfr) throws IOException { if (bcfr.getDataBlockRawSize(blockIndex) > Math.min(maxSize, MAX_ARRAY_SIZE)) { return null; } return bcfr.getDataBlock(blockIndex); } @Override String getBlockId() { return "bi-" + blockIndex; } } private class MetaBlockLoader extends BaseBlockLoader { String blockName; MetaBlockLoader(String blockName) { super(true); this.blockName = blockName; } @Override BlockReader getBlockReader(int maxSize, BCFile.Reader bcfr) throws IOException { if (bcfr.getMetaBlockRawSize(blockName) > Math.min(maxSize, MAX_ARRAY_SIZE)) { return null; } return bcfr.getMetaBlock(blockName); } @Override String getBlockId() { return "meta-" + blockName; } } private abstract class BaseBlockLoader implements Loader { abstract BlockReader getBlockReader(int maxSize, BCFile.Reader bcfr) throws IOException; abstract String getBlockId(); private boolean loadingMetaBlock; public BaseBlockLoader(boolean loadingMetaBlock) { this.loadingMetaBlock = loadingMetaBlock; } @Override public Map<String,Loader> getDependencies() { if (bcfr.get() == null && loadingMetaBlock) { String _lookup = cacheId + ROOT_BLOCK_NAME; return Collections.singletonMap(_lookup, new BCFileLoader()); } return Collections.emptyMap(); } @Override public byte[] load(int maxSize, Map<String,byte[]> dependencies) { try { BCFile.Reader reader = bcfr.get(); if (reader == null) { if (loadingMetaBlock) { byte[] serializedMetadata = dependencies.get(cacheId + ROOT_BLOCK_NAME); reader = getBCFile(serializedMetadata); } else { reader = getBCFile(); } } BlockReader _currBlock = getBlockReader(maxSize, reader); if (_currBlock == null) { return null; } byte[] b = null; try { b = new byte[(int) _currBlock.getRawSize()]; _currBlock.readFully(b); } catch (IOException e) { log.debug("Error full blockRead for file " + cacheId + " for block " + getBlockId(), e); throw new UncheckedIOException(e); } finally { _currBlock.close(); } return b; } catch (IOException e) { throw new UncheckedIOException(e); } } } public Reader(CachableBuilder b) { this.cacheId = Objects.requireNonNull(b.cacheId); this.inputSupplier = b.inputSupplier; this.lengthSupplier = b.lengthSupplier; this.fileLenCache = b.fileLenCache; this.cacheProvider = b.cacheProvider; this.readLimiter = b.readLimiter; this.conf = b.hadoopConf; this.cryptoService = Objects.requireNonNull(b.cryptoService); } /** * It is intended that once the BlockRead object is returned to the caller, that the caller will * read the entire block and then call close on the BlockRead class. */ public CachedBlockRead getMetaBlock(String blockName) throws IOException { BlockCache _iCache = cacheProvider.getIndexCache(); if (_iCache != null) { String _lookup = this.cacheId + "M" + blockName; try { CacheEntry ce = _iCache.getBlock(_lookup, new MetaBlockLoader(blockName)); if (ce != null) { return new CachedBlockRead(ce, ce.getBuffer()); } } catch (UncheckedIOException uioe) { if (uioe.getCause() instanceof MetaBlockDoesNotExist) { // When a block does not exists, its expected that MetaBlockDoesNotExist is thrown. // However do not want to throw cause, because stack trace info // would be lost. So rewrap and throw ino rder to preserve full stack trace. throw new MetaBlockDoesNotExist(uioe); } throw uioe; } } BlockReader _currBlock = getBCFile(null).getMetaBlock(blockName); return new CachedBlockRead(_currBlock); } public CachedBlockRead getMetaBlock(long offset, long compressedSize, long rawSize) throws IOException { BlockCache _iCache = cacheProvider.getIndexCache(); if (_iCache != null) { String _lookup = this.cacheId + "R" + offset; CacheEntry ce = _iCache.getBlock(_lookup, new RawBlockLoader(offset, compressedSize, rawSize, true)); if (ce != null) { return new CachedBlockRead(ce, ce.getBuffer()); } } BlockReader _currBlock = getBCFile(null).getDataBlock(offset, compressedSize, rawSize); return new CachedBlockRead(_currBlock); } /** * It is intended that once the BlockRead object is returned to the caller, that the caller will * read the entire block and then call close on the BlockRead class. * * NOTE: In the case of multi-read threads: This method can do redundant work where an entry is * read from disk and other threads check the cache before it has been inserted. */ public CachedBlockRead getDataBlock(int blockIndex) throws IOException { BlockCache _dCache = cacheProvider.getDataCache(); if (_dCache != null) { String _lookup = this.cacheId + "O" + blockIndex; CacheEntry ce = _dCache.getBlock(_lookup, new OffsetBlockLoader(blockIndex, false)); if (ce != null) { return new CachedBlockRead(ce, ce.getBuffer()); } } BlockReader _currBlock = getBCFile().getDataBlock(blockIndex); return new CachedBlockRead(_currBlock); } public CachedBlockRead getDataBlock(long offset, long compressedSize, long rawSize) throws IOException { BlockCache _dCache = cacheProvider.getDataCache(); if (_dCache != null) { String _lookup = this.cacheId + "R" + offset; CacheEntry ce = _dCache.getBlock(_lookup, new RawBlockLoader(offset, compressedSize, rawSize, false)); if (ce != null) { return new CachedBlockRead(ce, ce.getBuffer()); } } BlockReader _currBlock = getBCFile().getDataBlock(offset, compressedSize, rawSize); return new CachedBlockRead(_currBlock); } @Override public synchronized void close() throws IOException { if (closed) { return; } closed = true; BCFile.Reader reader = bcfr.getAndSet(null); if (reader != null) { reader.close(); } if (fin != null) { // synchronize on the FSDataInputStream to ensure thread safety with the // BoundedRangeFileInputStream synchronized (fin) { fin.close(); } } } public void setCacheProvider(CacheProvider cacheProvider) { this.cacheProvider = cacheProvider; } } public static class CachedBlockRead extends DataInputStream { private SeekableByteArrayInputStream seekableInput; private final CacheEntry cb; boolean indexable; public CachedBlockRead(InputStream in) { super(in); cb = null; seekableInput = null; indexable = false; } public CachedBlockRead(CacheEntry cb, byte[] buf) { this(new SeekableByteArrayInputStream(buf), cb); } private CachedBlockRead(SeekableByteArrayInputStream seekableInput, CacheEntry cb) { super(seekableInput); this.seekableInput = seekableInput; this.cb = cb; indexable = true; } public void seek(int position) { seekableInput.seek(position); } public int getPosition() { return seekableInput.getPosition(); } public boolean isIndexable() { return indexable; } public byte[] getBuffer() { return seekableInput.getBuffer(); } public BlockIndex getIndex(Supplier<BlockIndex> indexSupplier) { return cb.getIndex(indexSupplier); } public void indexWeightChanged() { cb.indexWeightChanged(); } } }
9,680
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/LruBlockCacheManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.lru; import org.apache.accumulo.core.spi.cache.BlockCache; import org.apache.accumulo.core.spi.cache.BlockCacheManager; import org.apache.accumulo.core.spi.cache.CacheType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class LruBlockCacheManager extends BlockCacheManager { private static final Logger LOG = LoggerFactory.getLogger(LruBlockCacheManager.class); @Override protected BlockCache createCache(Configuration conf, CacheType type) { LruBlockCacheConfiguration cc = new LruBlockCacheConfiguration(conf, type); LOG.info("Creating {} cache with configuration {}", type, cc); return new LruBlockCache(cc); } @Override public void stop() { for (CacheType type : CacheType.values()) { LruBlockCache cache = ((LruBlockCache) this.getBlockCache(type)); if (cache != null) { cache.shutdown(); } } super.stop(); } }
9,681
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/CachedBlockQueue.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.lru; import java.util.LinkedList; import java.util.Objects; import java.util.PriorityQueue; /** * A memory-bound queue that will grow until an element brings total size &gt;= maxSize. From then * on, only entries that are sorted larger than the smallest current entry will be * inserted/replaced. * * <p> * Use this when you want to find the largest elements (according to their ordering, not their heap * size) that consume as close to the specified maxSize as possible. Default behavior is to grow * just above rather than just below specified max. * * <p> * Object used in this queue must implement {@link HeapSize} as well as {@link Comparable}. */ public class CachedBlockQueue implements HeapSize { private final PriorityQueue<CachedBlock> queue; private long heapSize; private final long maxSize; /** * @param maxSize the target size of elements in the queue * @param blockSize expected average size of blocks */ public CachedBlockQueue(long maxSize, long blockSize) { int initialSize = (int) Math.ceil(maxSize / (double) blockSize); if (initialSize == 0) { initialSize++; } queue = new PriorityQueue<>(initialSize); heapSize = 0; this.maxSize = maxSize; } /** * Attempt to add the specified cached block to this queue. * * <p> * If the queue is smaller than the max size, or if the specified element is ordered before the * smallest element in the queue, the element will be added to the queue. Otherwise, there is no * side effect of this call. * * @param cb block to try to add to the queue */ public void add(CachedBlock cb) { if (heapSize < maxSize) { queue.add(cb); heapSize += cb.heapSize(); } else { CachedBlock head = Objects.requireNonNull(queue.peek(), "No cached blocks available from queue"); if (cb.compareTo(head) > 0) { heapSize += cb.heapSize(); heapSize -= head.heapSize(); if (heapSize > maxSize) { queue.poll(); } else { heapSize += head.heapSize(); } queue.add(cb); } } } /** * Get a sorted List of all elements in this queue, in descending order. * * @return list of cached elements in descending order */ public CachedBlock[] get() { LinkedList<CachedBlock> blocks = new LinkedList<>(); while (!queue.isEmpty()) { blocks.addFirst(queue.poll()); } return blocks.toArray(new CachedBlock[0]); } /** * Total size of all elements in this queue. * * @return size of all elements currently in queue, in bytes */ @Override public long heapSize() { return heapSize; } }
9,682
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/HeapSize.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.lru; /** * Implementations can be asked for an estimate of their size in bytes. * <p> * Useful for sizing caches. Its a given that implementation approximations do not account for 32 vs * 64 bit nor for different VM implementations. * <p> * An Object's size is determined by the non-static data members in it, as well as the fixed * {@link Object} overhead. * <p> * For example: * * <pre> * public class SampleObject implements HeapSize { * int[] numbers; * int x; * } * </pre> */ public interface HeapSize { /** * @return Approximate 'exclusive deep size' of implementing object. Includes count of payload and * hosting object sizings. */ long heapSize(); }
9,683
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/CachedBlock.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.lru; import java.util.Objects; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import org.apache.accumulo.core.file.blockfile.cache.impl.ClassSize; import org.apache.accumulo.core.file.blockfile.cache.impl.SizeConstants; import org.apache.accumulo.core.spi.cache.CacheEntry.Weighable; /** * Represents an entry in the configurable block cache. * * <p> * Makes the block memory-aware with {@link HeapSize} and Comparable to sort by access time for the * LRU. It also takes care of priority by either instantiating as in-memory or handling the * transition from single to multiple access. */ public class CachedBlock implements HeapSize, Comparable<CachedBlock> { public static final long PER_BLOCK_OVERHEAD = ClassSize.align(ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (2 * SizeConstants.SIZEOF_LONG) + ClassSize.STRING + ClassSize.BYTE_BUFFER + ClassSize.REFERENCE); public enum BlockPriority { /** * Accessed a single time (used for scan-resistance) */ SINGLE, /** * Accessed multiple times */ MULTI, /** * Block from in-memory store */ MEMORY } private final byte[] buffer; private final String blockName; private volatile long accessTime; private volatile long recordedSize; private BlockPriority priority; private Weighable index; public CachedBlock(String blockName, byte[] buf, long accessTime, boolean inMemory) { this.buffer = buf; this.blockName = blockName; this.accessTime = accessTime; if (inMemory) { this.priority = BlockPriority.MEMORY; } else { this.priority = BlockPriority.SINGLE; } } /** * Block has been accessed. Update its local access time. */ public void access(long accessTime) { this.accessTime = accessTime; if (this.priority == BlockPriority.SINGLE) { this.priority = BlockPriority.MULTI; } } @Override public long heapSize() { if (recordedSize < 0) { throw new IllegalStateException("Block was evicted: " + blockName); } return recordedSize; } @Override public int hashCode() { return Objects.hashCode(accessTime); } @Override public boolean equals(Object obj) { return this == obj || (obj instanceof CachedBlock && compareTo((CachedBlock) obj) == 0); } @Override public int compareTo(CachedBlock that) { if (this.accessTime == that.accessTime) { return 0; } return this.accessTime < that.accessTime ? 1 : -1; } public String getName() { return this.blockName; } public BlockPriority getPriority() { return this.priority; } public byte[] getBuffer() { return buffer; } @SuppressWarnings("unchecked") public synchronized <T extends Weighable> T getIndex(Supplier<T> supplier) { if (index == null && recordedSize >= 0) { index = supplier.get(); } return (T) index; } private synchronized long _recordSize(AtomicLong totalSize) { long indexSize = (index == null) ? 0 : index.weight(); long newSize = ClassSize.align(blockName.length()) + ClassSize.align(buffer.length) + PER_BLOCK_OVERHEAD + indexSize; long delta = newSize - recordedSize; recordedSize = newSize; return totalSize.addAndGet(delta); } /** * Attempt to record size if not evicted. * * @return -1 if evicted */ synchronized long tryRecordSize(AtomicLong totalSize) { if (recordedSize >= 0) { return _recordSize(totalSize); } return -1; } public synchronized long recordSize(AtomicLong totalSize) { if (recordedSize >= 0) { return _recordSize(totalSize); } throw new IllegalStateException("Block was evicted: " + blockName); } public synchronized long evicted(AtomicLong totalSize) { if (recordedSize >= 0) { totalSize.addAndGet(recordedSize * -1); long tmp = recordedSize; recordedSize = -1; index = null; return tmp; } throw new IllegalStateException("Block was already evicted: " + blockName); } }
9,684
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/SynchronousLoadingBlockCache.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.lru; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.apache.accumulo.core.spi.cache.BlockCache; import org.apache.accumulo.core.spi.cache.CacheEntry; /** * This class implements loading in such a way that load operations for the same block will not run * concurrently. */ public abstract class SynchronousLoadingBlockCache implements BlockCache { private final Lock[] loadLocks; /** * @param numLocks this controls how many load operations can run concurrently */ SynchronousLoadingBlockCache(int numLocks) { loadLocks = new Lock[numLocks]; for (int i = 0; i < loadLocks.length; i++) { loadLocks[i] = new ReentrantLock(true); } } public SynchronousLoadingBlockCache() { this(5003); } private Map<String,byte[]> resolveDependencies(Map<String,Loader> loaderDeps) { Map<String,byte[]> depData; switch (loaderDeps.size()) { case 0: depData = Collections.emptyMap(); break; case 1: { Entry<String,Loader> entry = loaderDeps.entrySet().iterator().next(); CacheEntry dce = getBlock(entry.getKey(), entry.getValue()); if (dce == null) { depData = null; } else { depData = Collections.singletonMap(entry.getKey(), dce.getBuffer()); } break; } default: depData = new HashMap<>(); Set<Entry<String,Loader>> es = loaderDeps.entrySet(); for (Entry<String,Loader> entry : es) { CacheEntry dce = getBlock(entry.getKey(), entry.getValue()); if (dce == null) { depData = null; break; } depData.put(entry.getKey(), dce.getBuffer()); } break; } return depData; } /** * Get the maximum size of an individual cache entry. */ protected abstract int getMaxEntrySize(); /** * Get a block from the cache without changing any stats the cache is keeping. */ protected abstract CacheEntry getBlockNoStats(String blockName); @Override public CacheEntry getBlock(String blockName, Loader loader) { CacheEntry ce = getBlock(blockName); if (ce != null) { return ce; } // intentionally done before getting lock Map<String,byte[]> depData = resolveDependencies(loader.getDependencies()); if (depData == null) { return null; } int lockIndex = (blockName.hashCode() & 0x7fffffff) % loadLocks.length; Lock loadLock = loadLocks[lockIndex]; loadLock.lock(); try { // check again after getting lock, could have loaded while waiting on lock ce = getBlockNoStats(blockName); if (ce != null) { return ce; } // not in cache so load data byte[] data = loader.load(getMaxEntrySize(), depData); if (data == null) { return null; } // attempt to add data to cache return cacheBlock(blockName, data); } finally { loadLock.unlock(); } } }
9,685
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/LruBlockCache.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.lru; import static java.util.concurrent.TimeUnit.SECONDS; import static org.apache.accumulo.core.file.blockfile.cache.impl.ClassSize.CONCURRENT_HASHMAP; import static org.apache.accumulo.core.file.blockfile.cache.impl.ClassSize.CONCURRENT_HASHMAP_ENTRY; import static org.apache.accumulo.core.file.blockfile.cache.impl.ClassSize.CONCURRENT_HASHMAP_SEGMENT; import java.lang.ref.WeakReference; import java.util.Objects; import java.util.PriorityQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; import org.apache.accumulo.core.file.blockfile.cache.impl.ClassSize; import org.apache.accumulo.core.file.blockfile.cache.impl.SizeConstants; import org.apache.accumulo.core.spi.cache.BlockCache; import org.apache.accumulo.core.spi.cache.CacheEntry; import org.apache.accumulo.core.util.threads.ThreadPools; import org.apache.accumulo.core.util.threads.Threads.AccumuloDaemonThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; /** * A block cache implementation that is memory-aware using {@link HeapSize}, memory-bound using an * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} * operations. * * <p> * Contains three levels of block priority to allow for scan-resistance and in-memory families. A * block is added with an inMemory flag if necessary, otherwise a block becomes a single access * priority. Once a blocked is accessed again, it changes to multiple access. This is used to * prevent scans from thrashing the cache, adding a least-frequently-used element to the eviction * algorithm. * * <p> * Each priority is given its own chunk of the total cache to ensure fairness during eviction. Each * priority will retain close to its maximum size, however, if any priority is not using its entire * chunk the others are able to grow beyond their chunk size. * * <p> * Instantiated at a minimum with the total size and average block size. All sizes are in bytes. The * block size is not especially important as this cache is fully dynamic in its sizing of blocks. It * is only used for pre-allocating data structures and in initial heap estimation of the map. * * <p> * The detailed constructor defines the sizes for the three priorities (they should total to the * maximum size defined). It also sets the levels that trigger and control the eviction thread. * * <p> * The acceptable size is the cache size level which triggers the eviction process to start. It * evicts enough blocks to get the size below the minimum size specified. * * <p> * Eviction happens in a separate thread and involves a single full-scan of the map. It determines * how many bytes must be freed to reach the minimum size, and then while scanning determines the * fewest least-recently-used blocks necessary from each of the three priorities (would be 3 times * bytes to free). It then uses the priority chunk sizes to evict fairly according to the relative * sizes and usage. */ public class LruBlockCache extends SynchronousLoadingBlockCache implements BlockCache, HeapSize { private static final Logger log = LoggerFactory.getLogger(LruBlockCache.class); /** Statistics thread */ static final int statThreadPeriod = 60; /** Concurrent map (the cache) */ private final ConcurrentHashMap<String,CachedBlock> map; /** Eviction lock (locked when eviction in process) */ private final ReentrantLock evictionLock = new ReentrantLock(true); /** Volatile boolean to track if we are in an eviction process or not */ private volatile boolean evictionInProgress = false; /** Eviction thread */ private final EvictionThread evictionThread; /** Statistics thread schedule pool (for heavy debugging, could remove) */ private final ScheduledExecutorService scheduleThreadPool = ThreadPools.getServerThreadPools() .createScheduledExecutorService(1, "LRUBlockCacheStats", true); /** Current size of cache */ private final AtomicLong size; /** Current number of cached elements */ private final AtomicLong elements; /** Cache access count (sequential ID) */ private final AtomicLong count; /** Cache statistics */ private final CacheStats stats; /** Overhead of the structure itself */ private final long overhead; private final LruBlockCacheConfiguration conf; /** * Default constructor. Specify maximum size and expected average block size (approximation is * fine). * * <p> * All other factors will be calculated based on defaults specified in this class. * * @param conf block cache configuration */ @SuppressFBWarnings(value = "SC_START_IN_CTOR", justification = "bad practice to start threads in constructor; probably needs rewrite") public LruBlockCache(final LruBlockCacheConfiguration conf) { this.conf = conf; int mapInitialSize = (int) Math.ceil(1.2 * conf.getMaxSize() / conf.getBlockSize()); map = new ConcurrentHashMap<>(mapInitialSize, conf.getMapLoadFactor(), conf.getMapConcurrencyLevel()); this.stats = new CacheStats(); this.count = new AtomicLong(0); this.elements = new AtomicLong(0); this.overhead = calculateOverhead(conf.getMaxSize(), conf.getBlockSize(), conf.getMapConcurrencyLevel()); this.size = new AtomicLong(this.overhead); if (conf.isUseEvictionThread()) { this.evictionThread = new EvictionThread(this); this.evictionThread.start(); while (!this.evictionThread.running()) { try { Thread.sleep(10); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } } else { this.evictionThread = null; } ScheduledFuture<?> future = this.scheduleThreadPool.scheduleAtFixedRate( new StatisticsThread(this), statThreadPeriod, statThreadPeriod, SECONDS); ThreadPools.watchNonCriticalScheduledTask(future); } public long getOverhead() { return overhead; } /* * This class exists so that every cache entry does not have a reference to the cache. */ private class LruCacheEntry implements CacheEntry { private final CachedBlock block; LruCacheEntry(CachedBlock block) { this.block = block; } @Override public byte[] getBuffer() { return block.getBuffer(); } @Override public <T extends Weighable> T getIndex(Supplier<T> supplier) { return block.getIndex(supplier); } @Override public void indexWeightChanged() { long newSize = block.tryRecordSize(size); if (newSize >= 0 && newSize > acceptableSize() && !evictionInProgress) { runEviction(); } } } private CacheEntry wrap(CachedBlock cb) { if (cb == null) { return null; } return new LruCacheEntry(cb); } // BlockCache implementation /** * Cache the block with the specified name and buffer. * <p> * It is assumed this will NEVER be called on an already cached block. If that is done, it is * assumed that you are reinserting the same exact block due to a race condition and will update * the buffer but not modify the size of the cache. * * @param blockName block name * @param buf block buffer * @param inMemory if block is in-memory */ public CacheEntry cacheBlock(String blockName, byte[] buf, boolean inMemory) { CachedBlock cb = map.get(blockName); if (cb != null) { stats.duplicateReads(); cb.access(count.incrementAndGet()); } else { cb = new CachedBlock(blockName, buf, count.incrementAndGet(), inMemory); CachedBlock currCb = map.putIfAbsent(blockName, cb); if (currCb != null) { stats.duplicateReads(); cb = currCb; cb.access(count.incrementAndGet()); } else { // Actually added block to cache long newSize = cb.recordSize(size); elements.incrementAndGet(); if (newSize > acceptableSize() && !evictionInProgress) { runEviction(); } } } return wrap(cb); } /** * Cache the block with the specified name and buffer. * <p> * It is assumed this will NEVER be called on an already cached block. If that is done, it is * assumed that you are reinserting the same exact block due to a race condition and will update * the buffer but not modify the size of the cache. * * @param blockName block name * @param buf block buffer */ @Override public CacheEntry cacheBlock(String blockName, byte[] buf) { return cacheBlock(blockName, buf, false); } /** * Get the buffer of the block with the specified name. * * @param blockName block name * @return buffer of specified block name, or null if not in cache */ @Override public CacheEntry getBlock(String blockName) { CachedBlock cb = map.get(blockName); if (cb == null) { stats.miss(); return null; } stats.hit(); cb.access(count.incrementAndGet()); return wrap(cb); } @Override protected CacheEntry getBlockNoStats(String blockName) { CachedBlock cb = map.get(blockName); if (cb != null) { cb.access(count.incrementAndGet()); } return wrap(cb); } protected long evictBlock(CachedBlock block) { if (map.remove(block.getName()) != null) { elements.decrementAndGet(); stats.evicted(); return block.evicted(size); } return 0; } /** * Multi-threaded call to run the eviction process. */ private void runEviction() { if (evictionThread == null) { evict(); } else { evictionThread.evict(); } } /** * Eviction method. */ void evict() { // Ensure only one eviction at a time if (!evictionLock.tryLock()) { return; } try { evictionInProgress = true; long bytesToFree = size.get() - minSize(); log.trace("Block cache LRU eviction started. Attempting to free {} bytes", bytesToFree); if (bytesToFree <= 0) { return; } // Instantiate priority buckets BlockBucket bucketSingle = new BlockBucket(bytesToFree, conf.getBlockSize(), singleSize()); BlockBucket bucketMulti = new BlockBucket(bytesToFree, conf.getBlockSize(), multiSize()); BlockBucket bucketMemory = new BlockBucket(bytesToFree, conf.getBlockSize(), memorySize()); // Scan entire map putting into appropriate buckets for (CachedBlock cachedBlock : map.values()) { switch (cachedBlock.getPriority()) { case SINGLE: bucketSingle.add(cachedBlock); break; case MULTI: bucketMulti.add(cachedBlock); break; case MEMORY: bucketMemory.add(cachedBlock); break; } } PriorityQueue<BlockBucket> bucketQueue = new PriorityQueue<>(3); bucketQueue.add(bucketSingle); bucketQueue.add(bucketMulti); bucketQueue.add(bucketMemory); int remainingBuckets = 3; long bytesFreed = 0; BlockBucket bucket; while ((bucket = bucketQueue.poll()) != null) { long overflow = bucket.overflow(); if (overflow > 0) { long bucketBytesToFree = Math.min(overflow, (long) Math.ceil((bytesToFree - bytesFreed) / (double) remainingBuckets)); bytesFreed += bucket.free(bucketBytesToFree); } remainingBuckets--; } float singleMB = ((float) bucketSingle.totalSize()) / ((float) (1024 * 1024)); float multiMB = ((float) bucketMulti.totalSize()) / ((float) (1024 * 1024)); float memoryMB = ((float) bucketMemory.totalSize()) / ((float) (1024 * 1024)); log.trace( "Block cache LRU eviction completed. Freed {} bytes. Priority Sizes:" + " Single={}MB ({}), Multi={}MB ({}), Memory={}MB ({})", bytesFreed, singleMB, bucketSingle.totalSize(), multiMB, bucketMulti.totalSize(), memoryMB, bucketMemory.totalSize()); } finally { stats.evict(); evictionInProgress = false; evictionLock.unlock(); } } /** * Used to group blocks into priority buckets. There will be a BlockBucket for each priority * (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate number of * elements out of each according to configuration parameters and their relatives sizes. */ private class BlockBucket implements Comparable<BlockBucket> { private CachedBlockQueue queue; private long totalSize = 0; private long bucketSize; public BlockBucket(long bytesToFree, long blockSize, long bucketSize) { this.bucketSize = bucketSize; queue = new CachedBlockQueue(bytesToFree, blockSize); totalSize = 0; } public void add(CachedBlock block) { totalSize += block.heapSize(); queue.add(block); } public long free(long toFree) { CachedBlock[] blocks = queue.get(); long freedBytes = 0; for (CachedBlock block : blocks) { freedBytes += evictBlock(block); if (freedBytes >= toFree) { return freedBytes; } } return freedBytes; } public long overflow() { return totalSize - bucketSize; } public long totalSize() { return totalSize; } @Override public int compareTo(BlockBucket that) { if (this.overflow() == that.overflow()) { return 0; } return this.overflow() > that.overflow() ? 1 : -1; } @Override public int hashCode() { return Objects.hashCode(overflow()); } @Override public boolean equals(Object that) { if (that instanceof BlockBucket) { return compareTo((BlockBucket) that) == 0; } return false; } } @Override public long getMaxHeapSize() { return getMaxSize(); } @Override public long getMaxSize() { return this.conf.getMaxSize(); } @Override public int getMaxEntrySize() { return (int) Math.min(Integer.MAX_VALUE, getMaxSize()); } /** * Get the current size of this cache. * * @return current size in bytes */ public long getCurrentSize() { return this.size.get(); } /** * Get the current size of this cache. * * @return current size in bytes */ public long getFreeSize() { return getMaxSize() - getCurrentSize(); } /** * Get the size of this cache (number of cached blocks) * * @return number of cached blocks */ public long size() { return this.elements.get(); } /** * Get the number of eviction runs that have occurred */ public long getEvictionCount() { return this.stats.getEvictionCount(); } /** * Get the number of blocks that have been evicted during the lifetime of this cache. */ public long getEvictedCount() { return this.stats.getEvictedCount(); } /** * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows * above the acceptable level. * * <p> * Thread is triggered into action by {@link LruBlockCache#runEviction()} */ private static class EvictionThread extends AccumuloDaemonThread { private WeakReference<LruBlockCache> cache; private boolean running = false; public EvictionThread(LruBlockCache cache) { super("LruBlockCache.EvictionThread"); this.cache = new WeakReference<>(cache); } public synchronized boolean running() { return running; } @SuppressFBWarnings(value = "UW_UNCOND_WAIT", justification = "eviction is resumed by caller") @Override public void run() { while (true) { synchronized (this) { running = true; try { this.wait(); } catch (InterruptedException e) {} } LruBlockCache cache = this.cache.get(); if (cache == null) { break; } cache.evict(); } } @SuppressFBWarnings(value = "NN_NAKED_NOTIFY", justification = "eviction is resumed by caller") public void evict() { synchronized (this) { this.notify(); } } } /* * Statistics thread. Periodically prints the cache statistics to the log. */ private static class StatisticsThread extends AccumuloDaemonThread { LruBlockCache lru; public StatisticsThread(LruBlockCache lru) { super("LruBlockCache.StatisticsThread"); this.lru = lru; } @Override public void run() { lru.logStats(); } } public void logStats() { // Log size long totalSize = heapSize(); long freeSize = this.conf.getMaxSize() - totalSize; float sizeMB = ((float) totalSize) / ((float) (1024 * 1024)); float freeMB = ((float) freeSize) / ((float) (1024 * 1024)); float maxMB = ((float) this.conf.getMaxSize()) / ((float) (1024 * 1024)); log.debug( "Cache Stats: Sizes: Total={}MB ({}), Free={}MB ({}), Max={}MB" + " ({}), Counts: Blocks={}, Access={}, Hit={}, Miss={}, Evictions={}," + " Evicted={},Ratios: Hit Ratio={}%, Miss Ratio={}%, Evicted/Run={}," + " Duplicate Reads={}", sizeMB, totalSize, freeMB, freeSize, maxMB, this.conf.getMaxSize(), size(), stats.requestCount(), stats.hitCount(), stats.getMissCount(), stats.getEvictionCount(), stats.getEvictedCount(), stats.getHitRatio() * 100, stats.getMissRatio() * 100, stats.evictedPerEviction(), stats.getDuplicateReads()); } /** * Get counter statistics for this cache. * * <p> * Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes. */ @Override public CacheStats getStats() { return this.stats; } public static class CacheStats implements BlockCache.Stats { private final AtomicLong accessCount = new AtomicLong(0); private final AtomicLong hitCount = new AtomicLong(0); private final AtomicLong missCount = new AtomicLong(0); private final AtomicLong evictionCount = new AtomicLong(0); private final AtomicLong evictedCount = new AtomicLong(0); private final AtomicLong duplicateReads = new AtomicLong(0); public void miss() { missCount.incrementAndGet(); accessCount.incrementAndGet(); } public void hit() { hitCount.incrementAndGet(); accessCount.incrementAndGet(); } public void evict() { evictionCount.incrementAndGet(); } public void duplicateReads() { duplicateReads.incrementAndGet(); } public void evicted() { evictedCount.incrementAndGet(); } @Override public long requestCount() { return accessCount.get(); } public long getMissCount() { return missCount.get(); } @Override public long hitCount() { return hitCount.get(); } public long getEvictionCount() { return evictionCount.get(); } public long getDuplicateReads() { return duplicateReads.get(); } public long getEvictedCount() { return evictedCount.get(); } public double getHitRatio() { return ((float) hitCount() / (float) requestCount()); } public double getMissRatio() { return ((float) getMissCount() / (float) requestCount()); } public double evictedPerEviction() { return (float) getEvictedCount() / (float) getEvictionCount(); } } public static final long CACHE_FIXED_OVERHEAD = ClassSize.align((3 * SizeConstants.SIZEOF_LONG) + (8 * ClassSize.REFERENCE) + (5 * SizeConstants.SIZEOF_FLOAT) + SizeConstants.SIZEOF_BOOLEAN + ClassSize.OBJECT); // HeapSize implementation @Override public long heapSize() { return getCurrentSize(); } public static long calculateOverhead(long maxSize, long blockSize, int concurrency) { long entryPart = Math.round(maxSize * 1.2 / blockSize) * CONCURRENT_HASHMAP_ENTRY; long segmentPart = (long) concurrency * CONCURRENT_HASHMAP_SEGMENT; return CACHE_FIXED_OVERHEAD + CONCURRENT_HASHMAP + entryPart + segmentPart; } // Simple calculators of sizes given factors and maxSize private long acceptableSize() { return (long) Math.floor(this.conf.getMaxSize() * this.conf.getAcceptableFactor()); } private long minSize() { return (long) Math.floor(this.conf.getMaxSize() * this.conf.getMinFactor()); } private long singleSize() { return (long) Math .floor(this.conf.getMaxSize() * this.conf.getSingleFactor() * this.conf.getMinFactor()); } private long multiSize() { return (long) Math .floor(this.conf.getMaxSize() * this.conf.getMultiFactor() * this.conf.getMinFactor()); } private long memorySize() { return (long) Math .floor(this.conf.getMaxSize() * this.conf.getMemoryFactor() * this.conf.getMinFactor()); } public void shutdown() { this.scheduleThreadPool.shutdown(); } }
9,686
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/lru/LruBlockCacheConfiguration.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.lru; import java.util.HashMap; import java.util.Map; import java.util.Optional; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheConfiguration; import org.apache.accumulo.core.spi.cache.BlockCacheManager.Configuration; import org.apache.accumulo.core.spi.cache.CacheType; import com.google.common.base.Preconditions; public final class LruBlockCacheConfiguration { public static final String PROPERTY_PREFIX = "lru"; /** Default Configuration Parameters */ /** Backing Concurrent Map Configuration */ public static final Float DEFAULT_LOAD_FACTOR = 0.75f; public static final Integer DEFAULT_CONCURRENCY_LEVEL = 16; /** Eviction thresholds */ public static final Float DEFAULT_MIN_FACTOR = 0.75f; public static final Float DEFAULT_ACCEPTABLE_FACTOR = 0.85f; /** Priority buckets */ public static final Float DEFAULT_SINGLE_FACTOR = 0.25f; public static final Float DEFAULT_MULTI_FACTOR = 0.50f; public static final Float DEFAULT_MEMORY_FACTOR = 0.25f; // property names public static final String ACCEPTABLE_FACTOR_PROPERTY = "acceptable.factor"; public static final String MIN_FACTOR_PROPERTY = "min.factor"; public static final String SINGLE_FACTOR_PROPERTY = "single.factor"; public static final String MULTI_FACTOR_PROPERTY = "multi.factor"; public static final String MEMORY_FACTOR_PROPERTY = "memory.factor"; public static final String MAP_LOAD_PROPERTY = "map.load"; public static final String MAP_CONCURRENCY_PROPERTY = "map.concurrency"; public static final String EVICTION_THREAD_PROPERTY = "eviction.thread"; /** Acceptable size of cache (no evictions if size < acceptable) */ private final float acceptableFactor; /** Minimum threshold of cache (when evicting, evict until size < min) */ private final float minFactor; /** Single access bucket size */ private final float singleFactor; /** Multiple access bucket size */ private final float multiFactor; /** In-memory bucket size */ private final float memoryFactor; /** LruBlockCache cache = new LruBlockCache **/ private final float mapLoadFactor; /** LruBlockCache cache = new LruBlockCache **/ private final int mapConcurrencyLevel; private final boolean useEvictionThread; private final Configuration conf; private final Map<String,String> props; private final CacheType type; private Optional<String> get(String k) { return Optional.ofNullable(props.get(k)); } public LruBlockCacheConfiguration(Configuration conf, CacheType type) { this.type = type; this.conf = conf; this.props = conf.getProperties(PROPERTY_PREFIX, type); this.acceptableFactor = get(ACCEPTABLE_FACTOR_PROPERTY).map(Float::valueOf).filter(f -> f > 0) .orElse(DEFAULT_ACCEPTABLE_FACTOR); this.minFactor = get(MIN_FACTOR_PROPERTY).map(Float::valueOf).filter(f -> f > 0).orElse(DEFAULT_MIN_FACTOR); this.singleFactor = get(SINGLE_FACTOR_PROPERTY).map(Float::valueOf).filter(f -> f > 0) .orElse(DEFAULT_SINGLE_FACTOR); this.multiFactor = get(MULTI_FACTOR_PROPERTY).map(Float::valueOf).filter(f -> f > 0) .orElse(DEFAULT_MULTI_FACTOR); this.memoryFactor = get(MEMORY_FACTOR_PROPERTY).map(Float::valueOf).filter(f -> f > 0) .orElse(DEFAULT_MEMORY_FACTOR); this.mapLoadFactor = get(MAP_LOAD_PROPERTY).map(Float::valueOf).filter(f -> f > 0).orElse(DEFAULT_LOAD_FACTOR); this.mapConcurrencyLevel = get(MAP_CONCURRENCY_PROPERTY).map(Integer::valueOf) .filter(i -> i > 0).orElse(DEFAULT_CONCURRENCY_LEVEL); this.useEvictionThread = get(EVICTION_THREAD_PROPERTY).map(Boolean::valueOf).orElse(true); if (this.getSingleFactor() + this.getMultiFactor() + this.getMemoryFactor() != 1) { throw new IllegalArgumentException("Single, multi, and memory factors should total 1.0"); } if (this.getMinFactor() >= this.getAcceptableFactor()) { throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); } if (this.getMinFactor() >= 1.0f || this.getAcceptableFactor() >= 1.0f) { throw new IllegalArgumentException("all factors must be < 1"); } } public float getAcceptableFactor() { return acceptableFactor; } public float getMinFactor() { return minFactor; } public float getSingleFactor() { return singleFactor; } public float getMultiFactor() { return multiFactor; } public float getMemoryFactor() { return memoryFactor; } public float getMapLoadFactor() { return mapLoadFactor; } public int getMapConcurrencyLevel() { return mapConcurrencyLevel; } public boolean isUseEvictionThread() { return useEvictionThread; } public static class Builder { private Map<String,String> props = new HashMap<>(); private String prefix; private Builder(String prefix) { this.prefix = prefix; } private void set(String prop, float val) { props.put(prefix + prop, Float.toString(val)); } public Builder acceptableFactor(float af) { Preconditions.checkArgument(af > 0); set(ACCEPTABLE_FACTOR_PROPERTY, af); return this; } public Builder minFactor(float mf) { Preconditions.checkArgument(mf > 0); set(MIN_FACTOR_PROPERTY, mf); return this; } public Builder singleFactor(float sf) { Preconditions.checkArgument(sf > 0); set(SINGLE_FACTOR_PROPERTY, sf); return this; } public Builder multiFactor(float mf) { Preconditions.checkArgument(mf > 0); set(MULTI_FACTOR_PROPERTY, mf); return this; } public Builder memoryFactor(float mf) { Preconditions.checkArgument(mf > 0); set(MEMORY_FACTOR_PROPERTY, mf); return this; } public Builder mapLoadFactor(float mlf) { Preconditions.checkArgument(mlf > 0); set(MAP_LOAD_PROPERTY, mlf); return this; } public Builder mapConcurrencyLevel(int mcl) { Preconditions.checkArgument(mcl > 0); props.put(prefix + MAP_CONCURRENCY_PROPERTY, mcl + ""); return this; } public Builder useEvictionThread(boolean uet) { props.put(prefix + EVICTION_THREAD_PROPERTY, uet + ""); return this; } public Map<String,String> buildMap() { return Map.copyOf(props); } } public static Builder builder(Property serverPrefix, CacheType ct) { return new Builder( BlockCacheConfiguration.getFullyQualifiedPropertyPrefix(serverPrefix, PROPERTY_PREFIX, ct)); } @Override public String toString() { return super.toString() + ", acceptableFactor: " + this.getAcceptableFactor() + ", minFactor: " + this.getMinFactor() + ", singleFactor: " + this.getSingleFactor() + ", multiFactor: " + this.getMultiFactor() + ", memoryFactor: " + this.getMemoryFactor() + ", mapLoadFactor: " + this.getMapLoadFactor() + ", mapConcurrencyLevel: " + this.getMapConcurrencyLevel() + ", useEvictionThread: " + this.isUseEvictionThread(); } public long getMaxSize() { return conf.getMaxSize(type); } public long getBlockSize() { return conf.getBlockSize(); } }
9,687
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/BlockCacheConfiguration.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.impl; import java.util.Collections; import java.util.HashMap; import java.util.Map; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.spi.cache.BlockCacheManager.Configuration; import org.apache.accumulo.core.spi.cache.CacheType; public class BlockCacheConfiguration implements Configuration { /** Approximate block size */ private final long blockSize; private final Property serverPrefix; private final Map<String,String> genProps; private final long indexMaxSize; private final long dataMaxSize; private final long summaryMaxSize; public static BlockCacheConfiguration forTabletServer(AccumuloConfiguration conf) { return new BlockCacheConfiguration(conf, Property.TSERV_PREFIX, Property.TSERV_INDEXCACHE_SIZE, Property.TSERV_DATACACHE_SIZE, Property.TSERV_SUMMARYCACHE_SIZE, Property.TSERV_DEFAULT_BLOCKSIZE); } public static BlockCacheConfiguration forScanServer(AccumuloConfiguration conf) { return new BlockCacheConfiguration(conf, Property.SSERV_PREFIX, Property.SSERV_INDEXCACHE_SIZE, Property.SSERV_DATACACHE_SIZE, Property.SSERV_SUMMARYCACHE_SIZE, Property.SSERV_DEFAULT_BLOCKSIZE); } private BlockCacheConfiguration(AccumuloConfiguration conf, Property serverPrefix, Property indexCacheSizeProperty, Property dataCacheSizeProperty, Property summaryCacheSizeProperty, Property defaultBlockSizeProperty) { this.serverPrefix = serverPrefix; this.genProps = conf.getAllPropertiesWithPrefix(serverPrefix); this.indexMaxSize = conf.getAsBytes(indexCacheSizeProperty); this.dataMaxSize = conf.getAsBytes(dataCacheSizeProperty); this.summaryMaxSize = conf.getAsBytes(summaryCacheSizeProperty); this.blockSize = conf.getAsBytes(defaultBlockSizeProperty); } @Override public long getMaxSize(CacheType type) { switch (type) { case INDEX: return indexMaxSize; case DATA: return dataMaxSize; case SUMMARY: return summaryMaxSize; default: throw new IllegalArgumentException("Unknown block cache type"); } } @Override public long getBlockSize() { return this.blockSize; } @Override public String toString() { return "indexMaxSize: " + indexMaxSize + "dataMaxSize: " + dataMaxSize + "summaryMaxSize: " + summaryMaxSize + ", blockSize: " + getBlockSize(); } @Override public Map<String,String> getProperties(String prefix, CacheType type) { HashMap<String,String> props = new HashMap<>(); // get default props first String defaultPrefix = getFullyQualifiedPropertyPrefix(serverPrefix, prefix); genProps.forEach((k, v) -> { if (k.startsWith(defaultPrefix)) { props.put(k.substring(defaultPrefix.length()), v); } }); String typePrefix = getFullyQualifiedPropertyPrefix(serverPrefix, prefix, type); genProps.forEach((k, v) -> { if (k.startsWith(typePrefix)) { props.put(k.substring(typePrefix.length()), v); } }); return Collections.unmodifiableMap(props); } public static String getFullyQualifiedPropertyPrefix(Property serverPrefix, String prefix) { return getCachePropertyBase(serverPrefix) + prefix + ".default."; } public static String getFullyQualifiedPropertyPrefix(Property serverPrefix, String prefix, CacheType type) { return getCachePropertyBase(serverPrefix) + prefix + "." + type.name().toLowerCase() + "."; } public static String getCachePropertyBase(Property serverPrefix) { return serverPrefix.getKey() + "cache.config."; } }
9,688
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/SizeConstants.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.impl; public class SizeConstants { public static final int SIZEOF_BOOLEAN = Byte.SIZE / Byte.SIZE; /** * Size of float in bytes */ public static final int SIZEOF_FLOAT = Float.SIZE / Byte.SIZE; /** * Size of int in bytes */ public static final int SIZEOF_INT = Integer.SIZE / Byte.SIZE; /** * Size of long in bytes */ public static final int SIZEOF_LONG = Long.SIZE / Byte.SIZE; }
9,689
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/BlockCacheManagerFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.impl; import org.apache.accumulo.core.classloader.ClassLoaderUtil; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.spi.cache.BlockCacheManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class BlockCacheManagerFactory { private static final Logger LOG = LoggerFactory.getLogger(BlockCacheManager.class); /** * Get the BlockCacheFactory specified by the property 'tserver.cache.factory.class' using the * System class loader * * @param conf accumulo configuration * @return block cache manager instance * @throws ReflectiveOperationException error loading block cache manager implementation class */ public static synchronized BlockCacheManager getInstance(AccumuloConfiguration conf) throws ReflectiveOperationException { String impl = conf.get(Property.TSERV_CACHE_MANAGER_IMPL); Class<? extends BlockCacheManager> clazz = ClassLoaderUtil.loadClass(impl, BlockCacheManager.class); LOG.info("Created new block cache manager of type: {}", clazz.getSimpleName()); return clazz.getDeclaredConstructor().newInstance(); } /** * Get the BlockCacheFactory specified by the property 'tserver.cache.factory.class' * * @param conf accumulo configuration * @return block cache manager instance * @throws ReflectiveOperationException error loading block cache manager implementation class */ public static synchronized BlockCacheManager getClientInstance(AccumuloConfiguration conf) throws ReflectiveOperationException { String impl = conf.get(Property.TSERV_CACHE_MANAGER_IMPL); Class<? extends BlockCacheManager> clazz = Class.forName(impl).asSubclass(BlockCacheManager.class); LOG.info("Created new block cache factory of type: {}", clazz.getSimpleName()); return clazz.getDeclaredConstructor().newInstance(); } }
9,690
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/ClassSize.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.impl; /** * Class for determining the "size" of a class, an attempt to calculate the actual bytes that an * object of this class will occupy in memory * * The core of this class is taken from the Derby project */ public class ClassSize { /** Array overhead */ public static final int ARRAY; /** Overhead for ByteBuffer */ public static final int BYTE_BUFFER; /** Object overhead is minimum 2 * reference size (8 bytes on 64-bit) */ public static final int OBJECT; /** Reference size is 8 bytes on 64-bit, 4 bytes on 32-bit */ public static final int REFERENCE; /** String overhead */ public static final int STRING; /** Overhead for ConcurrentHashMap */ public static final int CONCURRENT_HASHMAP; /** Overhead for ConcurrentHashMap.Entry */ public static final int CONCURRENT_HASHMAP_ENTRY; /** Overhead for ConcurrentHashMap.Segment */ public static final int CONCURRENT_HASHMAP_SEGMENT; /** Overhead for AtomicInteger */ public static final int ATOMIC_INTEGER; private static final String THIRTY_TWO = "32"; /** * Method for reading the arc settings and setting overheads according to 32-bit or 64-bit * architecture. */ static { // Figure out whether this is a 32 or 64 bit machine. String arcModel = System.getProperty("sun.arch.data.model"); // Default value is set to 8, covering the case when arcModel is unknown REFERENCE = arcModel.equals(THIRTY_TWO) ? 4 : 8; OBJECT = 2 * REFERENCE; ARRAY = 3 * REFERENCE; BYTE_BUFFER = align(OBJECT + align(REFERENCE) + align(ARRAY) + (5 * SizeConstants.SIZEOF_INT) + (3 * SizeConstants.SIZEOF_BOOLEAN) + SizeConstants.SIZEOF_LONG); STRING = align(OBJECT + ARRAY + REFERENCE + 3 * SizeConstants.SIZEOF_INT); CONCURRENT_HASHMAP = align((2 * SizeConstants.SIZEOF_INT) + ARRAY + (6 * REFERENCE) + OBJECT); CONCURRENT_HASHMAP_ENTRY = align(REFERENCE + OBJECT + (3 * REFERENCE) + (2 * SizeConstants.SIZEOF_INT)); CONCURRENT_HASHMAP_SEGMENT = align( REFERENCE + OBJECT + (3 * SizeConstants.SIZEOF_INT) + SizeConstants.SIZEOF_FLOAT + ARRAY); ATOMIC_INTEGER = align(OBJECT + SizeConstants.SIZEOF_INT); } /** * Aligns a number to 8. * * @param num number to align to 8 * @return smallest number &gt;= input that is a multiple of 8 */ public static int align(int num) { return (int) align((long) num); } /** * Aligns a number to 8. * * @param num number to align to 8 * @return smallest number &gt;= input that is a multiple of 8 */ public static long align(long num) { // The 7 comes from that the alignSize is 8 which is the number of bytes // stored and sent together return ((num + 7) >> 3) << 3; } }
9,691
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/tinylfu/TinyLfuBlockCacheManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.tinylfu; import org.apache.accumulo.core.spi.cache.BlockCacheManager; import org.apache.accumulo.core.spi.cache.CacheType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class TinyLfuBlockCacheManager extends BlockCacheManager { private static final Logger LOG = LoggerFactory.getLogger(TinyLfuBlockCacheManager.class); @Override protected TinyLfuBlockCache createCache(Configuration conf, CacheType type) { LOG.info("Creating {} cache with configuration {}", type, conf); return new TinyLfuBlockCache(conf, type); } }
9,692
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/tinylfu/TinyLfuBlockCache.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.file.blockfile.cache.tinylfu; import static java.util.concurrent.TimeUnit.SECONDS; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.function.Supplier; import org.apache.accumulo.core.file.blockfile.cache.impl.ClassSize; import org.apache.accumulo.core.file.blockfile.cache.impl.SizeConstants; import org.apache.accumulo.core.spi.cache.BlockCache; import org.apache.accumulo.core.spi.cache.BlockCacheManager.Configuration; import org.apache.accumulo.core.spi.cache.CacheEntry; import org.apache.accumulo.core.spi.cache.CacheEntry.Weighable; import org.apache.accumulo.core.spi.cache.CacheType; import org.apache.accumulo.core.util.threads.ThreadPools; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; import com.github.benmanes.caffeine.cache.Policy; import com.github.benmanes.caffeine.cache.stats.CacheStats; /** * A block cache that is memory bounded using the W-TinyLFU eviction algorithm. This implementation * delegates to a Caffeine cache to provide concurrent O(1) read and write operations. * <ul> * <li><a href="https://arxiv.org/pdf/1512.00727.pdf">W-TinyLFU</a></li> * <li><a href="https://github.com/ben-manes/caffeine">Caffeine</a></li> * <li><a href="https://highscalability.com/blog/2016/1/25/design-of-a-modern-cache.html">Cache * design</a></li> * </ul> */ public final class TinyLfuBlockCache implements BlockCache { private static final Logger log = LoggerFactory.getLogger(TinyLfuBlockCache.class); private static final int STATS_PERIOD_SEC = 60; private final Cache<String,Block> cache; private final Policy.Eviction<String,Block> policy; private final int maxSize; private final ScheduledExecutorService statsExecutor = ThreadPools.getServerThreadPools() .createScheduledExecutorService(1, "TinyLfuBlockCacheStatsExecutor", true); public TinyLfuBlockCache(Configuration conf, CacheType type) { cache = Caffeine.newBuilder() .initialCapacity((int) Math.ceil(1.2 * conf.getMaxSize(type) / conf.getBlockSize())) .weigher((String blockName, Block block) -> { int keyWeight = ClassSize.align(blockName.length()) + ClassSize.STRING; return keyWeight + block.weight(); }).maximumWeight(conf.getMaxSize(type)).recordStats().build(); policy = cache.policy().eviction().orElseThrow(); maxSize = (int) Math.min(Integer.MAX_VALUE, policy.getMaximum()); ScheduledFuture<?> future = statsExecutor.scheduleAtFixedRate(this::logStats, STATS_PERIOD_SEC, STATS_PERIOD_SEC, SECONDS); ThreadPools.watchNonCriticalScheduledTask(future); } @Override public long getMaxHeapSize() { return getMaxSize(); } @Override public long getMaxSize() { return maxSize; } @Override public CacheEntry getBlock(String blockName) { return wrap(blockName, cache.getIfPresent(blockName)); } @Override public CacheEntry cacheBlock(String blockName, byte[] buffer) { return wrap(blockName, cache.asMap().compute(blockName, (key, block) -> { return new Block(buffer); })); } @Override public BlockCache.Stats getStats() { CacheStats stats = cache.stats(); return new BlockCache.Stats() { @Override public long hitCount() { return stats.hitCount(); } @Override public long requestCount() { return stats.requestCount(); } }; } private void logStats() { double maxMB = ((double) policy.getMaximum()) / ((double) (1024 * 1024)); double sizeMB = ((double) policy.weightedSize().getAsLong()) / ((double) (1024 * 1024)); double freeMB = maxMB - sizeMB; log.debug("Cache Size={}MB, Free={}MB, Max={}MB, Blocks={}", sizeMB, freeMB, maxMB, cache.estimatedSize()); log.debug(cache.stats().toString()); } private static final class Block { private final byte[] buffer; private Weighable index; private volatile int lastIndexWeight; Block(byte[] buffer) { this.buffer = buffer; this.lastIndexWeight = buffer.length / 100; } int weight() { int indexWeight = lastIndexWeight + SizeConstants.SIZEOF_INT + ClassSize.REFERENCE; return indexWeight + ClassSize.align(getBuffer().length) + SizeConstants.SIZEOF_LONG + ClassSize.REFERENCE + ClassSize.OBJECT + ClassSize.ARRAY; } public byte[] getBuffer() { return buffer; } @SuppressWarnings("unchecked") public synchronized <T extends Weighable> T getIndex(Supplier<T> supplier) { if (index == null) { index = supplier.get(); } return (T) index; } public synchronized boolean indexWeightChanged() { if (index != null) { int indexWeight = index.weight(); if (indexWeight > lastIndexWeight) { lastIndexWeight = indexWeight; return true; } } return false; } } private CacheEntry wrap(String cacheKey, Block block) { if (block != null) { return new TlfuCacheEntry(cacheKey, block); } return null; } private class TlfuCacheEntry implements CacheEntry { private final String cacheKey; private final Block block; TlfuCacheEntry(String k, Block b) { this.cacheKey = k; this.block = b; } @Override public byte[] getBuffer() { return block.getBuffer(); } @Override public <T extends Weighable> T getIndex(Supplier<T> supplier) { return block.getIndex(supplier); } @Override public void indexWeightChanged() { if (block.indexWeightChanged()) { // update weight cache.put(cacheKey, block); } } } private Block load(Loader loader, Map<String,byte[]> resolvedDeps) { byte[] data = loader.load(maxSize, resolvedDeps); return data == null ? null : new Block(data); } private Map<String,byte[]> resolveDependencies(Map<String,Loader> deps) { if (deps.size() == 1) { Entry<String,Loader> entry = deps.entrySet().iterator().next(); CacheEntry ce = getBlock(entry.getKey(), entry.getValue()); if (ce == null) { return null; } return Collections.singletonMap(entry.getKey(), ce.getBuffer()); } else { HashMap<String,byte[]> resolvedDeps = new HashMap<>(); for (Entry<String,Loader> entry : deps.entrySet()) { CacheEntry ce = getBlock(entry.getKey(), entry.getValue()); if (ce == null) { return null; } resolvedDeps.put(entry.getKey(), ce.getBuffer()); } return resolvedDeps; } } @Override public CacheEntry getBlock(String blockName, Loader loader) { Map<String,Loader> deps = loader.getDependencies(); Block block; if (deps.isEmpty()) { block = cache.get(blockName, k -> load(loader, Collections.emptyMap())); } else { // This code path exist to handle the case where dependencies may need to be loaded. Loading // dependencies will access the cache. Cache load functions // should not access the cache. block = cache.getIfPresent(blockName); if (block == null) { // Load dependencies outside of cache load function. Map<String,byte[]> resolvedDeps = resolveDependencies(deps); if (resolvedDeps == null) { return null; } // Use asMap because it will not increment stats, getIfPresent recorded a miss above. Use // computeIfAbsent because it is possible another thread loaded // the data since this thread called getIfPresent. block = cache.asMap().computeIfAbsent(blockName, k -> load(loader, resolvedDeps)); } } return wrap(blockName, block); } }
9,693
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/security/TablePermission.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.security; import java.util.ArrayList; import java.util.List; /** * Accumulo table permissions. Each permission has an associated byte ID. */ public enum TablePermission { /* * One may add new permissions, but new permissions must use new numbers. Current numbers in use * must not be changed. */ // CREATE_LOCALITY_GROUP(0), // DROP_LOCALITY_GROUP(1), READ((byte) 2), WRITE((byte) 3), BULK_IMPORT((byte) 4), ALTER_TABLE((byte) 5), GRANT((byte) 6), DROP_TABLE((byte) 7), GET_SUMMARIES((byte) 8); private final byte permID; private static final TablePermission[] mapping = new TablePermission[9]; static { for (TablePermission perm : TablePermission.values()) { mapping[perm.permID] = perm; } } private TablePermission(byte id) { this.permID = id; } /** * Gets the byte ID of this permission. * * @return byte ID */ public byte getId() { return this.permID; } /** * Returns a list of printable permission values. * * @return list of table permission values, as "Table." + permission name */ public static List<String> printableValues() { TablePermission[] a = TablePermission.values(); List<String> list = new ArrayList<>(a.length); for (TablePermission p : a) { list.add("Table." + p); } return list; } /** * Gets the permission matching the given byte ID. * * @param id byte ID * @return table permission * @throws IndexOutOfBoundsException if the byte ID is invalid */ public static TablePermission getPermissionById(byte id) { TablePermission result = mapping[id]; if (result != null) { return result; } throw new IndexOutOfBoundsException("No such permission"); } }
9,694
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/security/VisibilityEvaluator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.security; import java.util.ArrayList; import org.apache.accumulo.core.data.ArrayByteSequence; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.security.ColumnVisibility.Node; /** * A class which evaluates visibility expressions against a set of authorizations. */ public class VisibilityEvaluator { private AuthorizationContainer auths; /** * Authorizations in column visibility expression are in escaped form. Column visibility parsing * does not unescape. This class wraps an AuthorizationContainer and unescapes auths before * checking the wrapped container. */ private static class UnescapingAuthorizationContainer implements AuthorizationContainer { private AuthorizationContainer wrapped; UnescapingAuthorizationContainer(AuthorizationContainer wrapee) { this.wrapped = wrapee; } @Override public boolean contains(ByteSequence auth) { return wrapped.contains(unescape(auth)); } } static ByteSequence unescape(ByteSequence auth) { int escapeCharCount = 0; for (int i = 0; i < auth.length(); i++) { byte b = auth.byteAt(i); if (b == '"' || b == '\\') { escapeCharCount++; } } if (escapeCharCount > 0) { if (escapeCharCount % 2 == 1) { throw new IllegalArgumentException("Illegal escape sequence in auth : " + auth); } byte[] unescapedCopy = new byte[auth.length() - escapeCharCount / 2]; int pos = 0; for (int i = 0; i < auth.length(); i++) { byte b = auth.byteAt(i); if (b == '\\') { i++; b = auth.byteAt(i); if (b != '"' && b != '\\') { throw new IllegalArgumentException("Illegal escape sequence in auth : " + auth); } } else if (b == '"') { // should only see quote after a slash throw new IllegalArgumentException("Illegal escape sequence in auth : " + auth); } unescapedCopy[pos++] = b; } return new ArrayByteSequence(unescapedCopy); } else { return auth; } } /** * Creates a new {@link Authorizations} object with escaped forms of the authorizations in the * given object. * * @param auths original authorizations * @return authorizations object with escaped authorization strings * @see #escape(byte[], boolean) */ static Authorizations escape(Authorizations auths) { ArrayList<byte[]> retAuths = new ArrayList<>(auths.getAuthorizations().size()); for (byte[] auth : auths.getAuthorizations()) { retAuths.add(escape(auth, false)); } return new Authorizations(retAuths); } /** * Properly escapes an authorization string. The string can be quoted if desired. * * @param auth authorization string, as UTF-8 encoded bytes * @param quote true to wrap escaped authorization in quotes * @return escaped authorization string */ public static byte[] escape(byte[] auth, boolean quote) { int escapeCount = 0; for (byte value : auth) { if (value == '"' || value == '\\') { escapeCount++; } } if (escapeCount > 0 || quote) { byte[] escapedAuth = new byte[auth.length + escapeCount + (quote ? 2 : 0)]; int index = quote ? 1 : 0; for (byte b : auth) { if (b == '"' || b == '\\') { escapedAuth[index++] = '\\'; } escapedAuth[index++] = b; } if (quote) { escapedAuth[0] = '"'; escapedAuth[escapedAuth.length - 1] = '"'; } auth = escapedAuth; } return auth; } /** * Creates a new evaluator for the authorizations found in the given container. * * @since 1.7.0 */ public VisibilityEvaluator(AuthorizationContainer authsContainer) { this.auths = new UnescapingAuthorizationContainer(authsContainer); } /** * Creates a new evaluator for the given collection of authorizations. Each authorization string * is escaped before handling, and the original strings are unchanged. * * @param authorizations authorizations object */ public VisibilityEvaluator(Authorizations authorizations) { this.auths = escape(authorizations); } /** * Evaluates the given column visibility against the authorizations provided to this evaluator. A * visibility passes evaluation if all authorizations in it are contained in those known to the * evaluator, and all AND and OR subexpressions have at least two children. * * @param visibility column visibility to evaluate * @return true if visibility passes evaluation * @throws VisibilityParseException if an AND or OR subexpression has less than two children, or a * subexpression is of an unknown type */ public boolean evaluate(ColumnVisibility visibility) throws VisibilityParseException { // The VisibilityEvaluator computes a trie from the given Authorizations, that ColumnVisibility // expressions can be evaluated against. return evaluate(visibility.getExpression(), visibility.getParseTree()); } private final boolean evaluate(final byte[] expression, final Node root) throws VisibilityParseException { if (expression.length == 0) { return true; } switch (root.type) { case TERM: return auths.contains(root.getTerm(expression)); case AND: if (root.children == null || root.children.size() < 2) { throw new VisibilityParseException("AND has less than 2 children", expression, root.start); } for (Node child : root.children) { if (!evaluate(expression, child)) { return false; } } return true; case OR: if (root.children == null || root.children.size() < 2) { throw new VisibilityParseException("OR has less than 2 children", expression, root.start); } for (Node child : root.children) { if (evaluate(expression, child)) { return true; } } return false; default: throw new VisibilityParseException("No such node type", expression, root.start); } } }
9,695
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/security/VisibilityParseException.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.security; import static java.nio.charset.StandardCharsets.UTF_8; import java.text.ParseException; /** * An exception thrown when a visibility string cannot be parsed. */ public class VisibilityParseException extends ParseException { private static final long serialVersionUID = 1L; private String visibility; /** * Creates a new exception. * * @param reason reason string * @param visibility visibility that could not be parsed * @param errorOffset offset into visibility where parsing failed */ public VisibilityParseException(String reason, byte[] visibility, int errorOffset) { super(reason, errorOffset); this.visibility = new String(visibility, UTF_8); } @Override public String getMessage() { return super.getMessage() + " in string '" + visibility + "' at position " + super.getErrorOffset(); } }
9,696
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.security; import static java.nio.charset.StandardCharsets.UTF_8; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.TreeSet; import org.apache.accumulo.core.data.ArrayByteSequence; import org.apache.accumulo.core.data.ByteSequence; import org.apache.accumulo.core.util.BadArgumentException; import org.apache.accumulo.core.util.TextUtil; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableComparator; /** * Validate the column visibility is a valid expression and set the visibility for a Mutation. See * {@link ColumnVisibility#ColumnVisibility(byte[])} for the definition of an expression. * * <p> * The expression is a sequence of characters from the set [A-Za-z0-9_-.] along with the binary * operators "&amp;" and "|" indicating that both operands are necessary, or the either is * necessary. The following are valid expressions for visibility: * * <pre> * A * A|B * (A|B)&amp;(C|D) * orange|(red&amp;yellow) * </pre> * * <p> * The following are not valid expressions for visibility: * * <pre> * A|B&amp;C * A=B * A|B| * A&amp;|B * () * ) * dog|!cat * </pre> * * <p> * In addition to the base set of visibilities, any character can be used in the expression if it is * quoted. If the quoted term contains '&quot;' or '\', then escape the character with '\'. The * {@link #quote(String)} method can be used to properly quote and escape terms automatically. The * following is an example of a quoted term: * * <pre> * &quot;A#C&quot; &amp; B * </pre> */ public class ColumnVisibility { Node node = null; private byte[] expression; /** * Accessor for the underlying byte string. * * @return byte array representation of a visibility expression */ public byte[] getExpression() { return expression; } /** * The node types in a parse tree for a visibility expression. */ public enum NodeType { EMPTY, TERM, OR, AND, } /** * All empty nodes are equal and represent the same value. */ private static final Node EMPTY_NODE = new Node(NodeType.EMPTY, 0); /** * A node in the parse tree for a visibility expression. */ public static class Node { /** * An empty list of nodes. */ public static final List<Node> EMPTY = Collections.emptyList(); NodeType type; int start; int end; List<Node> children = EMPTY; public Node(NodeType type, int start) { this.type = type; this.start = start; this.end = start + 1; } public Node(int start, int end) { this.type = NodeType.TERM; this.start = start; this.end = end; } public void add(Node child) { if (children == EMPTY) { children = new ArrayList<>(); } children.add(child); } public NodeType getType() { return type; } public List<Node> getChildren() { return children; } public int getTermStart() { return start; } public int getTermEnd() { return end; } public ByteSequence getTerm(byte[] expression) { if (type != NodeType.TERM) { throw new IllegalStateException(); } if (expression[start] == '"') { // its a quoted term int qStart = start + 1; int qEnd = end - 1; return new ArrayByteSequence(expression, qStart, qEnd - qStart); } return new ArrayByteSequence(expression, start, end - start); } } /** * A node comparator. Nodes sort according to node type, terms sort lexicographically. AND and OR * nodes sort by number of children, or if the same by corresponding children. */ public static class NodeComparator implements Comparator<Node>, Serializable { private static final long serialVersionUID = 1L; byte[] text; /** * Creates a new comparator. * * @param text expression string, encoded in UTF-8 */ public NodeComparator(byte[] text) { this.text = text; } @Override public int compare(Node a, Node b) { int diff = a.type.ordinal() - b.type.ordinal(); if (diff != 0) { return diff; } switch (a.type) { case EMPTY: return 0; // All empty nodes are the same case TERM: return WritableComparator.compareBytes(text, a.start, a.end - a.start, text, b.start, b.end - b.start); case OR: case AND: diff = a.children.size() - b.children.size(); if (diff != 0) { return diff; } for (int i = 0; i < a.children.size(); i++) { diff = compare(a.children.get(i), b.children.get(i)); if (diff != 0) { return diff; } } } return 0; } } /* * Convenience method that delegates to normalize with a new NodeComparator constructed using the * supplied expression. */ public static Node normalize(Node root, byte[] expression) { return normalize(root, expression, new NodeComparator(expression)); } // @formatter:off /* * Walks an expression's AST in order to: * 1) roll up expressions with the same operant (`a&(b&c) becomes a&b&c`) * 2) sort labels lexicographically (permutations of `a&b&c` are re-ordered to appear as `a&b&c`) * 3) dedupes labels (`a&b&a` becomes `a&b`) */ // @formatter:on public static Node normalize(Node root, byte[] expression, NodeComparator comparator) { if (root.type != NodeType.TERM) { TreeSet<Node> rolledUp = new TreeSet<>(comparator); java.util.Iterator<Node> itr = root.children.iterator(); while (itr.hasNext()) { Node c = normalize(itr.next(), expression, comparator); if (c.type == root.type) { rolledUp.addAll(c.children); itr.remove(); } } rolledUp.addAll(root.children); root.children.clear(); root.children.addAll(rolledUp); // need to promote a child if it's an only child if (root.children.size() == 1) { return root.children.get(0); } } return root; } /* * Walks an expression's AST and appends a string representation to a supplied StringBuilder. This * method adds parens where necessary. */ public static void stringify(Node root, byte[] expression, StringBuilder out) { if (root.type == NodeType.TERM) { out.append(new String(expression, root.start, root.end - root.start, UTF_8)); } else { String sep = ""; for (Node c : root.children) { out.append(sep); boolean parens = (c.type != NodeType.TERM && root.type != c.type); if (parens) { out.append("("); } stringify(c, expression, out); if (parens) { out.append(")"); } sep = root.type == NodeType.AND ? "&" : "|"; } } } /** * Generates a byte[] that represents a normalized, but logically equivalent, form of this * evaluator's expression. * * @return normalized expression in byte[] form */ public byte[] flatten() { Node normRoot = normalize(node, expression); StringBuilder builder = new StringBuilder(expression.length); stringify(normRoot, expression, builder); return builder.toString().getBytes(UTF_8); } private static class ColumnVisibilityParser { private int index = 0; private int parens = 0; public ColumnVisibilityParser() {} Node parse(byte[] expression) { if (expression.length > 0) { Node node = parse_(expression); if (node == null) { throw new BadArgumentException("operator or missing parens", new String(expression, UTF_8), index - 1); } if (parens != 0) { throw new BadArgumentException("parenthesis mis-match", new String(expression, UTF_8), index - 1); } return node; } return null; } Node processTerm(int start, int end, Node expr, byte[] expression) { if (start != end) { if (expr != null) { throw new BadArgumentException("expression needs | or &", new String(expression, UTF_8), start); } return new Node(start, end); } if (expr == null) { throw new BadArgumentException("empty term", new String(expression, UTF_8), start); } return expr; } Node parse_(byte[] expression) { Node result = null; Node expr = null; int wholeTermStart = index; int subtermStart = index; boolean subtermComplete = false; while (index < expression.length) { switch (expression[index++]) { case '&': expr = processTerm(subtermStart, index - 1, expr, expression); if (result != null) { if (!result.type.equals(NodeType.AND)) { throw new BadArgumentException("cannot mix & and |", new String(expression, UTF_8), index - 1); } } else { result = new Node(NodeType.AND, wholeTermStart); } result.add(expr); expr = null; subtermStart = index; subtermComplete = false; break; case '|': expr = processTerm(subtermStart, index - 1, expr, expression); if (result != null) { if (!result.type.equals(NodeType.OR)) { throw new BadArgumentException("cannot mix | and &", new String(expression, UTF_8), index - 1); } } else { result = new Node(NodeType.OR, wholeTermStart); } result.add(expr); expr = null; subtermStart = index; subtermComplete = false; break; case '(': parens++; if (subtermStart != index - 1 || expr != null) { throw new BadArgumentException("expression needs & or |", new String(expression, UTF_8), index - 1); } expr = parse_(expression); subtermStart = index; subtermComplete = false; break; case ')': parens--; Node child = processTerm(subtermStart, index - 1, expr, expression); if (child == null && result == null) { throw new BadArgumentException("empty expression not allowed", new String(expression, UTF_8), index); } if (result == null) { return child; } if (result.type == child.type) { for (Node c : child.children) { result.add(c); } } else { result.add(child); } result.end = index - 1; return result; case '"': if (subtermStart != index - 1) { throw new BadArgumentException("expression needs & or |", new String(expression, UTF_8), index - 1); } while (index < expression.length && expression[index] != '"') { if (expression[index] == '\\') { index++; if (index == expression.length || (expression[index] != '\\' && expression[index] != '"')) { throw new BadArgumentException("invalid escaping within quotes", new String(expression, UTF_8), index - 1); } } index++; } if (index == expression.length) { throw new BadArgumentException("unclosed quote", new String(expression, UTF_8), subtermStart); } if (subtermStart + 1 == index) { throw new BadArgumentException("empty term", new String(expression, UTF_8), subtermStart); } index++; subtermComplete = true; break; default: if (subtermComplete) { throw new BadArgumentException("expression needs & or |", new String(expression, UTF_8), index - 1); } byte c = expression[index - 1]; if (!Authorizations.isValidAuthChar(c)) { throw new BadArgumentException("bad character (" + c + ")", new String(expression, UTF_8), index - 1); } } } Node child = processTerm(subtermStart, index, expr, expression); if (result != null) { result.add(child); result.end = index; } else { result = child; } if (result.type != NodeType.TERM) { if (result.children.size() < 2) { throw new BadArgumentException("missing term", new String(expression, UTF_8), index); } } return result; } } private void validate(byte[] expression) { if (expression != null && expression.length > 0) { ColumnVisibilityParser p = new ColumnVisibilityParser(); node = p.parse(expression); } else { node = EMPTY_NODE; } this.expression = expression; } /** * Creates an empty visibility. Normally, elements with empty visibility can be seen by everyone. * Though, one could change this behavior with filters. * * @see #ColumnVisibility(String) */ public ColumnVisibility() { this(new byte[] {}); } /** * Creates a column visibility for a Mutation. * * @param expression An expression of the rights needed to see this mutation. The expression * syntax is defined at the class-level documentation */ public ColumnVisibility(String expression) { this(expression.getBytes(UTF_8)); } /** * Creates a column visibility for a Mutation. * * @param expression visibility expression * @see #ColumnVisibility(String) */ public ColumnVisibility(Text expression) { this(TextUtil.getBytes(expression)); } /** * Creates a column visibility for a Mutation from a string already encoded in UTF-8 bytes. * * @param expression visibility expression, encoded as UTF-8 bytes * @see #ColumnVisibility(String) */ public ColumnVisibility(byte[] expression) { validate(expression); } @Override public String toString() { return "[" + new String(expression, UTF_8) + "]"; } /** * See {@link #equals(ColumnVisibility)} */ @Override public boolean equals(Object obj) { if (obj instanceof ColumnVisibility) { return equals((ColumnVisibility) obj); } return false; } /** * Compares two ColumnVisibilities for string equivalence, not as a meaningful comparison of terms * and conditions. * * @param otherLe other column visibility * @return true if this visibility equals the other via string comparison */ public boolean equals(ColumnVisibility otherLe) { return Arrays.equals(expression, otherLe.expression); } @Override public int hashCode() { return Arrays.hashCode(expression); } /** * Gets the parse tree for this column visibility. * * @return parse tree node */ public Node getParseTree() { return node; } /** * Properly quotes terms in a column visibility expression. If no quoting is needed, then nothing * is done. * * <p> * Examples of using quote : * * <pre> * import static org.apache.accumulo.core.security.ColumnVisibility.quote; * . * . * . * String s = quote(&quot;A#C&quot;) + &quot;&amp;&quot; + quote(&quot;FOO&quot;); * ColumnVisibility cv = new ColumnVisibility(s); * </pre> * * @param term term to quote * @return quoted term (unquoted if unnecessary) */ public static String quote(String term) { return new String(quote(term.getBytes(UTF_8)), UTF_8); } /** * Properly quotes terms in a column visibility expression. If no quoting is needed, then nothing * is done. * * @param term term to quote, encoded as UTF-8 bytes * @return quoted term (unquoted if unnecessary), encoded as UTF-8 bytes * @see #quote(String) */ public static byte[] quote(byte[] term) { boolean needsQuote = false; for (byte b : term) { if (!Authorizations.isValidAuthChar(b)) { needsQuote = true; break; } } if (!needsQuote) { return term; } return VisibilityEvaluator.escape(term, true); } }
9,697
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/security/SystemPermission.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.security; import java.util.ArrayList; import java.util.HashMap; import java.util.List; /** * Accumulo system permissions. Each permission has an associated byte ID. */ public enum SystemPermission { /* * One may add new permissions, but new permissions must use new numbers. Current numbers in use * must not be changed. */ GRANT((byte) 0), CREATE_TABLE((byte) 1), DROP_TABLE((byte) 2), ALTER_TABLE((byte) 3), CREATE_USER((byte) 4), DROP_USER((byte) 5), ALTER_USER((byte) 6), SYSTEM((byte) 7), CREATE_NAMESPACE((byte) 8), DROP_NAMESPACE((byte) 9), ALTER_NAMESPACE((byte) 10), OBTAIN_DELEGATION_TOKEN((byte) 11); private byte permID; private static HashMap<Byte,SystemPermission> mapping; static { mapping = new HashMap<>(SystemPermission.values().length); for (SystemPermission perm : SystemPermission.values()) { mapping.put(perm.permID, perm); } } private SystemPermission(byte id) { this.permID = id; } /** * Gets the byte ID of this permission. * * @return byte ID */ public byte getId() { return this.permID; } /** * Returns a list of printable permission values. * * @return list of system permission values, as "System." + permission name */ public static List<String> printableValues() { SystemPermission[] a = SystemPermission.values(); List<String> list = new ArrayList<>(a.length); for (SystemPermission p : a) { list.add("System." + p); } return list; } /** * Gets the permission matching the given byte ID. * * @param id byte ID * @return system permission * @throws IndexOutOfBoundsException if the byte ID is invalid */ public static SystemPermission getPermissionById(byte id) { if (mapping.containsKey(id)) { return mapping.get(id); } throw new IndexOutOfBoundsException("No such permission"); } }
9,698
0
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core
Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/security/NamespacePermission.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.accumulo.core.security; import java.util.ArrayList; import java.util.List; /** * Accumulo namespace permissions. Each permission has an associated byte ID. */ public enum NamespacePermission { // One may add new permissions, but new permissions must use new numbers. Current numbers in use // must not be changed. READ((byte) 0), WRITE((byte) 1), ALTER_NAMESPACE((byte) 2), GRANT((byte) 3), ALTER_TABLE((byte) 4), CREATE_TABLE((byte) 5), DROP_TABLE((byte) 6), BULK_IMPORT((byte) 7), DROP_NAMESPACE((byte) 8); private final byte permID; private static final NamespacePermission[] mapping = new NamespacePermission[9]; static { for (NamespacePermission perm : NamespacePermission.values()) { mapping[perm.permID] = perm; } } private NamespacePermission(byte id) { this.permID = id; } /** * Gets the byte ID of this permission. * * @return byte ID */ public byte getId() { return this.permID; } /** * Returns a list of printable permission values. * * @return list of namespace permission values, as "Namespace." + permission name */ public static List<String> printableValues() { NamespacePermission[] a = NamespacePermission.values(); List<String> list = new ArrayList<>(a.length); for (NamespacePermission p : a) { list.add("Namespace." + p); } return list; } /** * Gets the permission matching the given byte ID. * * @param id byte ID * @return system permission * @throws IndexOutOfBoundsException if the byte ID is invalid */ // This method isn't used anywhere, why is it public API? public static NamespacePermission getPermissionById(byte id) { NamespacePermission result = mapping[id]; if (result != null) { return result; } throw new IndexOutOfBoundsException("No such permission"); } public static NamespacePermission getEquivalent(TablePermission permission) { switch (permission) { case READ: return NamespacePermission.READ; case WRITE: return NamespacePermission.WRITE; case ALTER_TABLE: return NamespacePermission.ALTER_TABLE; case GRANT: return NamespacePermission.GRANT; case DROP_TABLE: return NamespacePermission.DROP_TABLE; case BULK_IMPORT: return NamespacePermission.BULK_IMPORT; default: return null; } } public static NamespacePermission getEquivalent(SystemPermission permission) { switch (permission) { case CREATE_TABLE: return NamespacePermission.CREATE_TABLE; case DROP_TABLE: return NamespacePermission.DROP_TABLE; case ALTER_TABLE: return NamespacePermission.ALTER_TABLE; case ALTER_NAMESPACE: return NamespacePermission.ALTER_NAMESPACE; case DROP_NAMESPACE: return NamespacePermission.DROP_NAMESPACE; case GRANT: return NamespacePermission.ALTER_NAMESPACE; case CREATE_NAMESPACE: case CREATE_USER: case DROP_USER: case ALTER_USER: case SYSTEM: default: return null; } } }
9,699