index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/WholeColumnFamilyIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iteratorsImpl.system.MultiIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class WholeColumnFamilyIteratorTest {
@Test
public void testEmptyStuff() throws IOException {
SortedMap<Key,Value> map = new TreeMap<>();
SortedMap<Key,Value> map2 = new TreeMap<>();
final Map<Text,Boolean> toInclude = new HashMap<>();
map.put(new Key(new Text("r1"), new Text("cf1"), new Text("cq1"), new Text("cv1"), 1L),
new Value("val1"));
map.put(new Key(new Text("r1"), new Text("cf1"), new Text("cq2"), new Text("cv1"), 2L),
new Value("val2"));
map.put(new Key(new Text("r2"), new Text("cf1"), new Text("cq1"), new Text("cv1"), 3L),
new Value("val3"));
map.put(new Key(new Text("r2"), new Text("cf2"), new Text("cq1"), new Text("cv1"), 4L),
new Value("val4"));
map.put(new Key(new Text("r3"), new Text("cf1"), new Text("cq1"), new Text("cv1"), 5L),
new Value("val4"));
map.put(new Key(new Text("r3"), new Text("cf1"), new Text("cq1"), new Text("cv2"), 6L),
new Value("val4"));
map.put(new Key(new Text("r4"), new Text("cf1"), new Text("cq1"), new Text("cv1"), 7L),
new Value(""));
map.put(new Key(new Text("r4"), new Text("cf1"), new Text("cq1"), new Text(""), 8L),
new Value("val1"));
map.put(new Key(new Text("r4"), new Text("cf1"), new Text(""), new Text("cv1"), 9L),
new Value("val1"));
map.put(new Key(new Text("r4"), new Text(""), new Text("cq1"), new Text("cv1"), 10L),
new Value("val1"));
map.put(new Key(new Text(""), new Text("cf1"), new Text("cq1"), new Text("cv1"), 11L),
new Value("val1"));
boolean b = true;
int trueCount = 0;
for (Key k : map.keySet()) {
if (toInclude.containsKey(k.getRow())) {
if (toInclude.get(k.getRow())) {
map2.put(k, map.get(k));
}
continue;
}
b = !b;
toInclude.put(k.getRow(), b);
if (b) {
trueCount++;
map2.put(k, map.get(k));
}
}
SortedMapIterator source = new SortedMapIterator(map);
WholeColumnFamilyIterator iter = new WholeColumnFamilyIterator(source);
SortedMap<Key,Value> resultMap = new TreeMap<>();
iter.seek(new Range(), new ArrayList<>(), false);
int numRows = 0;
while (iter.hasTop()) {
numRows++;
Key rowKey = iter.getTopKey();
Value rowValue = iter.getTopValue();
resultMap.putAll(WholeColumnFamilyIterator.decodeColumnFamily(rowKey, rowValue));
iter.next();
}
// we have 7 groups of row key/cf
assertEquals(7, numRows);
assertEquals(resultMap, map);
WholeColumnFamilyIterator iter2 = new WholeColumnFamilyIterator(source) {
@Override
public boolean filter(Text row, List<Key> keys, List<Value> values) {
return toInclude.get(row);
}
};
resultMap.clear();
iter2.seek(new Range(), new ArrayList<>(), false);
numRows = 0;
while (iter2.hasTop()) {
numRows++;
Key rowKey = iter2.getTopKey();
Value rowValue = iter2.getTopValue();
resultMap.putAll(WholeColumnFamilyIterator.decodeColumnFamily(rowKey, rowValue));
iter2.next();
}
assertEquals(numRows, trueCount);
assertEquals(resultMap, map2);
}
private void pkv(SortedMap<Key,Value> map, String row, String cf, String cq, String cv, long ts,
String val) {
map.put(new Key(new Text(row), new Text(cf), new Text(cq), new Text(cv), ts), new Value(val));
}
@Test
public void testContinue() throws Exception {
SortedMap<Key,Value> map1 = new TreeMap<>();
pkv(map1, "row1", "cf1", "cq1", "cv1", 5, "foo");
pkv(map1, "row1", "cf1", "cq2", "cv1", 6, "bar");
SortedMap<Key,Value> map2 = new TreeMap<>();
pkv(map2, "row2", "cf1", "cq1", "cv1", 5, "foo");
pkv(map2, "row2", "cf1", "cq2", "cv1", 6, "bar");
SortedMap<Key,Value> map3 = new TreeMap<>();
pkv(map3, "row3", "cf1", "cq1", "cv1", 5, "foo");
pkv(map3, "row3", "cf1", "cq2", "cv1", 6, "bar");
SortedMap<Key,Value> map = new TreeMap<>();
map.putAll(map1);
map.putAll(map2);
map.putAll(map3);
SortedMapIterator source = new SortedMapIterator(map);
WholeColumnFamilyIterator iter = new WholeColumnFamilyIterator(source);
Range range = new Range(new Text("row1"), true, new Text("row2"), true);
iter.seek(range, new ArrayList<>(), false);
assertTrue(iter.hasTop());
assertEquals(map1,
WholeColumnFamilyIterator.decodeColumnFamily(iter.getTopKey(), iter.getTopValue()));
// simulate something continuing using the last key from the iterator
// this is what client and server code will do
range = new Range(iter.getTopKey(), false, range.getEndKey(), range.isEndKeyInclusive());
iter.seek(range, new ArrayList<>(), false);
assertTrue(iter.hasTop());
assertEquals(map2,
WholeColumnFamilyIterator.decodeColumnFamily(iter.getTopKey(), iter.getTopValue()));
iter.next();
assertFalse(iter.hasTop());
}
@Test
public void testBug1() throws Exception {
SortedMap<Key,Value> map1 = new TreeMap<>();
pkv(map1, "row1", "cf1", "cq1", "cv1", 5, "foo");
pkv(map1, "row1", "cf1", "cq2", "cv1", 6, "bar");
SortedMap<Key,Value> map2 = new TreeMap<>();
pkv(map2, "row2", "cf1", "cq1", "cv1", 5, "foo");
SortedMap<Key,Value> map = new TreeMap<>();
map.putAll(map1);
map.putAll(map2);
MultiIterator source = new MultiIterator(Collections.singletonList(new SortedMapIterator(map)),
new Range(null, true, new Text("row1"), true));
WholeColumnFamilyIterator iter = new WholeColumnFamilyIterator(source);
Range range = new Range(new Text("row1"), true, new Text("row2"), true);
iter.seek(range, new ArrayList<>(), false);
assertTrue(iter.hasTop());
assertEquals(map1,
WholeColumnFamilyIterator.decodeColumnFamily(iter.getTopKey(), iter.getTopValue()));
// simulate something continuing using the last key from the iterator
// this is what client and server code will do
range = new Range(iter.getTopKey(), false, range.getEndKey(), range.isEndKeyInclusive());
iter.seek(range, new ArrayList<>(), false);
assertFalse(iter.hasTop());
}
}
| 9,500 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/RowFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.TreeMap;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.DefaultIteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.ColumnFamilySkippingIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class RowFilterTest {
public static class SummingRowFilter extends RowFilter {
@Override
public boolean acceptRow(SortedKeyValueIterator<Key,Value> rowIterator) throws IOException {
int sum = 0;
int sum2 = 0;
Key firstKey = null;
if (rowIterator.hasTop()) {
firstKey = new Key(rowIterator.getTopKey());
}
while (rowIterator.hasTop()) {
sum += Integer.parseInt(rowIterator.getTopValue().toString());
rowIterator.next();
}
// ensure that seeks are confined to the row
rowIterator.seek(new Range(null, false, firstKey == null ? null : firstKey.getRow(), false),
Set.of(), false);
while (rowIterator.hasTop()) {
sum2 += Integer.parseInt(rowIterator.getTopValue().toString());
rowIterator.next();
}
rowIterator.seek(new Range(firstKey == null ? null : firstKey.getRow(), false, null, true),
Set.of(), false);
while (rowIterator.hasTop()) {
sum2 += Integer.parseInt(rowIterator.getTopValue().toString());
rowIterator.next();
}
return sum == 2 && sum2 == 0;
}
}
public static class RowZeroOrOneFilter extends RowFilter {
private static final Set<String> passRows = Set.of("0", "1");
@Override
public boolean acceptRow(SortedKeyValueIterator<Key,Value> rowIterator) {
return rowIterator.hasTop() && passRows.contains(rowIterator.getTopKey().getRow().toString());
}
}
public static class RowOneOrTwoFilter extends RowFilter {
private static final Set<String> passRows = Set.of("1", "2");
@Override
public boolean acceptRow(SortedKeyValueIterator<Key,Value> rowIterator) {
return rowIterator.hasTop() && passRows.contains(rowIterator.getTopKey().getRow().toString());
}
}
public static class TrueFilter extends RowFilter {
@Override
public boolean acceptRow(SortedKeyValueIterator<Key,Value> rowIterator) {
return true;
}
}
public List<Mutation> createMutations() {
List<Mutation> mutations = new LinkedList<>();
Mutation m = new Mutation("0");
m.put("cf1", "cq1", "1");
m.put("cf1", "cq2", "1");
m.put("cf1", "cq3", "1");
m.put("cf1", "cq4", "1");
m.put("cf1", "cq5", "1");
m.put("cf1", "cq6", "1");
m.put("cf1", "cq7", "1");
m.put("cf1", "cq8", "1");
m.put("cf1", "cq9", "1");
m.put("cf2", "cq1", "1");
m.put("cf2", "cq2", "1");
mutations.add(m);
m = new Mutation("1");
m.put("cf1", "cq1", "1");
m.put("cf2", "cq2", "2");
mutations.add(m);
m = new Mutation("2");
m.put("cf1", "cq1", "1");
m.put("cf1", "cq2", "1");
mutations.add(m);
m = new Mutation("3");
m.put("cf1", "cq1", "0");
m.put("cf2", "cq2", "2");
mutations.add(m);
m = new Mutation("4");
m.put("cf1", "cq1", "1");
m.put("cf1", "cq2", "1");
m.put("cf1", "cq3", "1");
m.put("cf1", "cq4", "1");
m.put("cf1", "cq5", "1");
m.put("cf1", "cq6", "1");
m.put("cf1", "cq7", "1");
m.put("cf1", "cq8", "1");
m.put("cf1", "cq9", "1");
m.put("cf2", "cq1", "1");
m.put("cf2", "cq2", "1");
mutations.add(m);
return mutations;
}
public TreeMap<Key,Value> createKeyValues() {
List<Mutation> mutations = createMutations();
TreeMap<Key,Value> keyValues = new TreeMap<>();
final Text cf = new Text(), cq = new Text();
for (Mutation m : mutations) {
final Text row = new Text(m.getRow());
for (ColumnUpdate update : m.getUpdates()) {
cf.set(update.getColumnFamily());
cq.set(update.getColumnQualifier());
Key k = new Key(row, cf, cq);
Value v = new Value(update.getValue());
keyValues.put(k, v);
}
}
return keyValues;
}
@Test
public void test1() throws Exception {
ColumnFamilySkippingIterator source =
new ColumnFamilySkippingIterator(new SortedMapIterator(createKeyValues()));
RowFilter filter = new SummingRowFilter();
filter.init(source, Collections.emptyMap(), new DefaultIteratorEnvironment());
filter.seek(new Range(), Collections.emptySet(), false);
assertEquals(Set.of("2", "3"), getRows(filter));
ByteSequence cf = new ArrayByteSequence("cf2");
filter.seek(new Range(), Set.of(cf), true);
assertEquals(Set.of("1", "3", "0", "4"), getRows(filter));
filter.seek(new Range("0", "4"), Collections.emptySet(), false);
assertEquals(Set.of("2", "3"), getRows(filter));
filter.seek(new Range("2"), Collections.emptySet(), false);
assertEquals(Set.of("2"), getRows(filter));
filter.seek(new Range("4"), Collections.emptySet(), false);
assertEquals(Set.of(), getRows(filter));
filter.seek(new Range("4"), Set.of(cf), true);
assertEquals(Set.of("4"), getRows(filter));
}
@Test
public void testChainedRowFilters() throws Exception {
SortedMapIterator source = new SortedMapIterator(createKeyValues());
RowFilter filter0 = new TrueFilter();
filter0.init(source, Collections.emptyMap(), new DefaultIteratorEnvironment());
RowFilter filter = new TrueFilter();
filter.init(filter0, Collections.emptyMap(), new DefaultIteratorEnvironment());
filter.seek(new Range(), Collections.emptySet(), false);
assertEquals(Set.of("0", "1", "2", "3", "4"), getRows(filter));
}
@Test
public void testFilterConjunction() throws Exception {
SortedMapIterator source = new SortedMapIterator(createKeyValues());
RowFilter filter0 = new RowZeroOrOneFilter();
filter0.init(source, Collections.emptyMap(), new DefaultIteratorEnvironment());
RowFilter filter = new RowOneOrTwoFilter();
filter.init(filter0, Collections.emptyMap(), new DefaultIteratorEnvironment());
filter.seek(new Range(), Collections.emptySet(), false);
assertEquals(Set.of("1"), getRows(filter));
}
@Test
public void deepCopyCopiesTheSource() throws Exception {
SortedMapIterator source = new SortedMapIterator(createKeyValues());
RowFilter filter = new RowZeroOrOneFilter();
filter.init(source, Collections.emptyMap(), new DefaultIteratorEnvironment());
filter.seek(new Range(), Collections.emptySet(), false);
// Save off the first key and value
Key firstKey = filter.getTopKey();
Value firstValue = filter.getTopValue();
// Assert that the row is valid given our filter
assertEquals("0", firstKey.getRow().toString());
// Read some extra data, just making sure it's all valid
Key lastKeyRead = null;
for (int i = 0; i < 5; i++) {
filter.next();
lastKeyRead = filter.getTopKey();
assertEquals("0", lastKeyRead.getRow().toString());
}
// Make a copy of the original RowFilter
RowFilter copy = (RowFilter) filter.deepCopy(new DefaultIteratorEnvironment());
// Because it's a copy, we should be able to safely seek this one without affecting the original
copy.seek(new Range(), Collections.emptySet(), false);
assertTrue(copy.hasTop(), "deepCopy'ed RowFilter did not have a top key");
Key firstKeyFromCopy = copy.getTopKey();
Value firstValueFromCopy = copy.getTopValue();
// Verify that we got the same first k-v pair we did earlier
assertEquals(firstKey, firstKeyFromCopy);
assertEquals(firstValue, firstValueFromCopy);
filter.next();
Key finalKeyRead = filter.getTopKey();
// Make sure we got a Key that was greater than the last Key we read from the original RowFilter
assertTrue(lastKeyRead.compareTo(finalKeyRead) < 0,
"Expected next key read to be greater than the previous after deepCopy");
}
private HashSet<String> getRows(RowFilter filter) throws IOException {
HashSet<String> rows = new HashSet<>();
while (filter.hasTop()) {
rows.add(filter.getTopKey().getRowData().toString());
filter.next();
}
return rows;
}
}
| 9,501 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/LargeRowFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import java.util.TreeMap;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iteratorsImpl.system.ColumnFamilySkippingIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.junit.jupiter.api.Test;
public class LargeRowFilterTest {
private String genRow(int r) {
return String.format("row%03d", r);
}
private String genCQ(int cq) {
return String.format("cf%03d", cq);
}
private void genRow(TreeMap<Key,Value> testData, int row, int startCQ, int stopCQ) {
for (int cq = startCQ; cq < stopCQ; cq++) {
testData.put(new Key(genRow(row), "cf001", genCQ(cq), 5), new Value("v" + row + "_" + cq));
}
}
private void genTestData(TreeMap<Key,Value> testData, int numRows) {
for (int i = 1; i <= numRows; i++) {
genRow(testData, i, 0, i);
}
}
private LargeRowFilter setupIterator(TreeMap<Key,Value> testData, int maxColumns,
IteratorScope scope) throws IOException {
SortedMapIterator smi = new SortedMapIterator(testData);
LargeRowFilter lrfi = new LargeRowFilter();
IteratorSetting is = new IteratorSetting(1, LargeRowFilter.class);
LargeRowFilter.setMaxColumns(is, maxColumns);
lrfi.init(new ColumnFamilySkippingIterator(smi), is.getOptions(),
new RowDeletingIteratorTest.TestIE(scope, false));
return lrfi;
}
@Test
public void testBasic() throws Exception {
TreeMap<Key,Value> testData = new TreeMap<>();
genTestData(testData, 20);
for (int i = 1; i <= 20; i++) {
TreeMap<Key,Value> expectedData = new TreeMap<>();
genTestData(expectedData, i);
LargeRowFilter lrfi = setupIterator(testData, i, IteratorScope.scan);
lrfi.seek(new Range(), Set.of(), false);
TreeMap<Key,Value> filteredData = new TreeMap<>();
while (lrfi.hasTop()) {
filteredData.put(lrfi.getTopKey(), lrfi.getTopValue());
lrfi.next();
}
assertEquals(expectedData, filteredData);
}
}
@Test
public void testSeek() throws Exception {
TreeMap<Key,Value> testData = new TreeMap<>();
genTestData(testData, 20);
for (int i = 1; i <= 20; i++) {
TreeMap<Key,Value> expectedData = new TreeMap<>();
genTestData(expectedData, i);
LargeRowFilter lrfi = setupIterator(testData, i, IteratorScope.scan);
TreeMap<Key,Value> filteredData = new TreeMap<>();
// seek to each row... rows that exceed max columns should be filtered
for (int j = 1; j <= i; j++) {
lrfi.seek(new Range(genRow(j), genRow(j)), Set.of(), false);
while (lrfi.hasTop()) {
assertEquals(genRow(j), lrfi.getTopKey().getRow().toString());
filteredData.put(lrfi.getTopKey(), lrfi.getTopValue());
lrfi.next();
}
}
assertEquals(expectedData, filteredData);
}
}
@Test
public void testSeek2() throws Exception {
TreeMap<Key,Value> testData = new TreeMap<>();
genTestData(testData, 20);
LargeRowFilter lrfi = setupIterator(testData, 13, IteratorScope.scan);
// test seeking to the middle of a row
lrfi.seek(new Range(new Key(genRow(15), "cf001", genCQ(4), 5), true,
new Key(genRow(15)).followingKey(PartialKey.ROW), false), Set.of(), false);
assertFalse(lrfi.hasTop());
lrfi.seek(new Range(new Key(genRow(10), "cf001", genCQ(4), 5), true,
new Key(genRow(10)).followingKey(PartialKey.ROW), false), Set.of(), false);
TreeMap<Key,Value> expectedData = new TreeMap<>();
genRow(expectedData, 10, 4, 10);
TreeMap<Key,Value> filteredData = new TreeMap<>();
while (lrfi.hasTop()) {
filteredData.put(lrfi.getTopKey(), lrfi.getTopValue());
lrfi.next();
}
assertEquals(expectedData, filteredData);
}
@Test
public void testCompaction() throws Exception {
TreeMap<Key,Value> testData = new TreeMap<>();
genTestData(testData, 20);
LargeRowFilter lrfi = setupIterator(testData, 13, IteratorScope.majc);
lrfi.seek(new Range(), Set.of(), false);
TreeMap<Key,Value> compactedData = new TreeMap<>();
while (lrfi.hasTop()) {
compactedData.put(lrfi.getTopKey(), lrfi.getTopValue());
lrfi.next();
}
// compacted data should now contain suppression markers
// add column to row that should be suppressed\
genRow(compactedData, 15, 15, 16);
// scanning over data w/ higher max columns should not change behavior
// because there are suppression markers.. if there was a bug and data
// was not suppressed, increasing the threshold would expose the bug
lrfi = setupIterator(compactedData, 20, IteratorScope.scan);
lrfi.seek(new Range(), Set.of(), false);
// only expect to see 13 rows
TreeMap<Key,Value> expectedData = new TreeMap<>();
genTestData(expectedData, 13);
TreeMap<Key,Value> filteredData = new TreeMap<>();
while (lrfi.hasTop()) {
filteredData.put(lrfi.getTopKey(), lrfi.getTopValue());
lrfi.next();
}
assertEquals(expectedData.size() + 8, compactedData.size());
assertEquals(expectedData, filteredData);
// try seeking to the middle of row 15... row has data and suppression marker... this seeks past
// the marker but before the column
lrfi.seek(new Range(new Key(genRow(15), "cf001", genCQ(4), 5), true,
new Key(genRow(15)).followingKey(PartialKey.ROW), false), Set.of(), false);
assertFalse(lrfi.hasTop());
// test seeking w/ column families
HashSet<ByteSequence> colfams = new HashSet<>();
colfams.add(new ArrayByteSequence("cf001"));
lrfi.seek(new Range(new Key(genRow(15), "cf001", genCQ(4), 5), true,
new Key(genRow(15)).followingKey(PartialKey.ROW), false), colfams, true);
assertFalse(lrfi.hasTop());
}
// in other test data is generated in such a way that once a row
// is suppressed, all subsequent rows are suppressed
@Test
public void testSuppressInner() throws Exception {
TreeMap<Key,Value> testData = new TreeMap<>();
genRow(testData, 1, 0, 2);
genRow(testData, 2, 0, 50);
genRow(testData, 3, 0, 15);
genRow(testData, 4, 0, 5);
TreeMap<Key,Value> expectedData = new TreeMap<>();
genRow(expectedData, 1, 0, 2);
genRow(expectedData, 4, 0, 5);
LargeRowFilter lrfi = setupIterator(testData, 13, IteratorScope.scan);
lrfi.seek(new Range(), Set.of(), false);
TreeMap<Key,Value> filteredData = new TreeMap<>();
while (lrfi.hasTop()) {
filteredData.put(lrfi.getTopKey(), lrfi.getTopValue());
lrfi.next();
}
assertEquals(expectedData, filteredData);
}
}
| 9,502 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/RowDeletingIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.TreeMap;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iteratorsImpl.system.ColumnFamilySkippingIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class RowDeletingIteratorTest {
public static class TestIE implements IteratorEnvironment {
private IteratorScope scope;
private boolean fmc;
public TestIE(IteratorScope scope, boolean fmc) {
this.scope = scope;
this.fmc = fmc;
}
@Override
public IteratorScope getIteratorScope() {
return scope;
}
@Override
public boolean isFullMajorCompaction() {
return fmc;
}
}
Key newKey(String row, String cf, String cq, long time) {
return new Key(new Text(row), new Text(cf), new Text(cq), time);
}
void put(TreeMap<Key,Value> tm, String row, String cf, String cq, long time, Value val) {
tm.put(newKey(row, cf, cq, time), val);
}
void put(TreeMap<Key,Value> tm, String row, String cf, String cq, long time, String val) {
put(tm, row, cf, cq, time, new Value(val));
}
private void testAssertions(RowDeletingIterator rdi, String row, String cf, String cq, long time,
String val) {
assertTrue(rdi.hasTop());
assertEquals(newKey(row, cf, cq, time), rdi.getTopKey());
assertEquals(val, rdi.getTopValue().toString());
}
@Test
public void test1() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "", "", 10, RowDeletingIterator.DELETE_ROW_VALUE);
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq3", 5, "v1");
put(tm1, "r2", "cf1", "cq1", 5, "v1");
RowDeletingIterator rdi = new RowDeletingIterator();
rdi.init(new SortedMapIterator(tm1), null, new TestIE(IteratorScope.scan, false));
rdi.seek(new Range(), new ArrayList<>(), false);
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
for (int i = 0; i < 5; i++) {
rdi.seek(new Range(newKey("r1", "cf1", "cq" + i, 5), null), new ArrayList<>(), false);
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
}
rdi.seek(new Range(newKey("r11", "cf1", "cq1", 5), null), new ArrayList<>(), false);
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
put(tm1, "r2", "", "", 10, RowDeletingIterator.DELETE_ROW_VALUE);
rdi.seek(new Range(), new ArrayList<>(), false);
assertFalse(rdi.hasTop());
for (int i = 0; i < 5; i++) {
rdi.seek(new Range(newKey("r1", "cf1", "cq" + i, 5), null), new ArrayList<>(), false);
assertFalse(rdi.hasTop());
}
put(tm1, "r0", "cf1", "cq1", 5, "v1");
rdi.seek(new Range(), new ArrayList<>(), false);
testAssertions(rdi, "r0", "cf1", "cq1", 5, "v1");
rdi.next();
assertFalse(rdi.hasTop());
}
@Test
public void test2() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "", "", 10, RowDeletingIterator.DELETE_ROW_VALUE);
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq3", 15, "v1");
put(tm1, "r1", "cf1", "cq4", 5, "v1");
put(tm1, "r1", "cf1", "cq5", 15, "v1");
put(tm1, "r2", "cf1", "cq1", 5, "v1");
RowDeletingIterator rdi = new RowDeletingIterator();
rdi.init(new SortedMapIterator(tm1), null, new TestIE(IteratorScope.scan, false));
rdi.seek(new Range(), new ArrayList<>(), false);
testAssertions(rdi, "r1", "cf1", "cq3", 15, "v1");
rdi.next();
testAssertions(rdi, "r1", "cf1", "cq5", 15, "v1");
rdi.next();
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
rdi.seek(new Range(newKey("r1", "cf1", "cq1", 5), null), new ArrayList<>(), false);
testAssertions(rdi, "r1", "cf1", "cq3", 15, "v1");
rdi.next();
testAssertions(rdi, "r1", "cf1", "cq5", 15, "v1");
rdi.next();
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
rdi.seek(new Range(newKey("r1", "cf1", "cq4", 5), null), new ArrayList<>(), false);
testAssertions(rdi, "r1", "cf1", "cq5", 15, "v1");
rdi.next();
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
rdi.seek(new Range(newKey("r1", "cf1", "cq5", 20), null), new ArrayList<>(), false);
testAssertions(rdi, "r1", "cf1", "cq5", 15, "v1");
rdi.next();
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
rdi.seek(new Range(newKey("r1", "cf1", "cq9", 20), null), new ArrayList<>(), false);
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
}
@Test
public void test3() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "", "", 10, RowDeletingIterator.DELETE_ROW_VALUE);
put(tm1, "r1", "", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r2", "", "cq1", 5, "v1");
put(tm1, "r2", "cf1", "cq1", 5, "v1");
RowDeletingIterator rdi = new RowDeletingIterator();
rdi.init(new ColumnFamilySkippingIterator(new SortedMapIterator(tm1)), null,
new TestIE(IteratorScope.scan, false));
HashSet<ByteSequence> cols = new HashSet<>();
cols.add(new ArrayByteSequence("cf1".getBytes()));
rdi.seek(new Range(), cols, true);
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
cols.clear();
cols.add(new ArrayByteSequence("".getBytes()));
rdi.seek(new Range(), cols, false);
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
cols.clear();
rdi.seek(new Range(), cols, false);
testAssertions(rdi, "r2", "", "cq1", 5, "v1");
rdi.next();
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
}
@Test
public void test4() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "", "", 10, RowDeletingIterator.DELETE_ROW_VALUE);
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq3", 15, "v1");
put(tm1, "r1", "cf1", "cq4", 5, "v1");
put(tm1, "r2", "cf1", "cq1", 5, "v1");
RowDeletingIterator rdi = new RowDeletingIterator();
rdi.init(new SortedMapIterator(tm1), null, new TestIE(IteratorScope.minc, false));
rdi.seek(new Range(), new ArrayList<>(), false);
testAssertions(rdi, "r1", "", "", 10, RowDeletingIterator.DELETE_ROW_VALUE.toString());
rdi.next();
testAssertions(rdi, "r1", "cf1", "cq3", 15, "v1");
rdi.next();
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
rdi.seek(new Range(newKey("r1", "cf1", "cq3", 20), null), new ArrayList<>(), false);
testAssertions(rdi, "r1", "cf1", "cq3", 15, "v1");
rdi.next();
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
rdi.seek(new Range(newKey("r1", "", "", 42), null), new ArrayList<>(), false);
testAssertions(rdi, "r1", "", "", 10, RowDeletingIterator.DELETE_ROW_VALUE.toString());
rdi.next();
testAssertions(rdi, "r1", "cf1", "cq3", 15, "v1");
rdi.next();
testAssertions(rdi, "r2", "cf1", "cq1", 5, "v1");
}
}
| 9,503 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/CombinerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.TreeMap;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.lexicoder.Encoder;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.Combiner;
import org.apache.accumulo.core.iterators.Combiner.ValueIterator;
import org.apache.accumulo.core.iterators.CombinerTestUtil;
import org.apache.accumulo.core.iterators.DefaultIteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.LongCombiner;
import org.apache.accumulo.core.iterators.LongCombiner.FixedLenEncoder;
import org.apache.accumulo.core.iterators.LongCombiner.StringEncoder;
import org.apache.accumulo.core.iterators.LongCombiner.VarLenEncoder;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.TypedValueCombiner;
import org.apache.accumulo.core.iterators.ValueFormatException;
import org.apache.accumulo.core.iteratorsImpl.system.MultiIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class CombinerTest {
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
static class CombinerIteratorEnvironment extends DefaultIteratorEnvironment {
private IteratorScope scope;
private boolean isFullMajc;
CombinerIteratorEnvironment(IteratorScope scope, boolean isFullMajc) {
this.scope = scope;
this.isFullMajc = isFullMajc;
}
@Override
public IteratorScope getIteratorScope() {
return scope;
}
@Override
public boolean isFullMajorCompaction() {
return isFullMajc;
}
}
static final IteratorEnvironment SCAN_IE =
new CombinerIteratorEnvironment(IteratorScope.scan, false);
static Key newKey(int row, int colf, int colq, long ts, boolean deleted) {
Key k = newKey(row, colf, colq, ts);
k.setDeleted(deleted);
return k;
}
static Key newKey(int row, int colf, int colq, long ts) {
return new Key(newRow(row), new Text(String.format("cf%03d", colf)),
new Text(String.format("cq%03d", colq)), ts);
}
static Range newRow(int row, int colf, int colq, long ts, boolean inclusive) {
return new Range(newKey(row, colf, colq, ts), inclusive, null, true);
}
static Range newRow(int row, int colf, int colq, long ts) {
return newRow(row, colf, colq, ts, true);
}
static <V> void newKeyValue(TreeMap<Key,Value> tm, int row, int colf, int colq, long ts,
boolean deleted, V val, Encoder<V> encoder) {
Key k = newKey(row, colf, colq, ts);
k.setDeleted(deleted);
tm.put(k, new Value(encoder.encode(val)));
}
static Text newRow(int row) {
return new Text(String.format("r%03d", row));
}
@Test
public void test1() throws IOException {
Encoder<Long> encoder = LongCombiner.VAR_LEN_ENCODER;
TreeMap<Key,Value> tm1 = new TreeMap<>();
// keys that do not aggregate
newKeyValue(tm1, 1, 1, 1, 1, false, 2L, encoder);
newKeyValue(tm1, 1, 1, 1, 2, false, 3L, encoder);
newKeyValue(tm1, 1, 1, 1, 3, false, 4L, encoder);
Combiner ai = new SummingCombiner();
IteratorSetting is = new IteratorSetting(1, SummingCombiner.class);
LongCombiner.setEncodingType(is, SummingCombiner.Type.VARLEN);
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("2")));
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("4", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 2), ai.getTopKey());
assertEquals("3", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 1), ai.getTopKey());
assertEquals("2", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
// try seeking
ai.seek(newRow(1, 1, 1, 2), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 2), ai.getTopKey());
assertEquals("3", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 1), ai.getTopKey());
assertEquals("2", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
// seek after everything
ai.seek(newRow(1, 1, 1, 0), EMPTY_COL_FAMS, false);
assertFalse(ai.hasTop());
}
@Test
public void test2() throws IOException {
Encoder<Long> encoder = LongCombiner.VAR_LEN_ENCODER;
TreeMap<Key,Value> tm1 = new TreeMap<>();
// keys that aggregate
newKeyValue(tm1, 1, 1, 1, 1, false, 2L, encoder);
newKeyValue(tm1, 1, 1, 1, 2, false, 3L, encoder);
newKeyValue(tm1, 1, 1, 1, 3, false, 4L, encoder);
Combiner ai = new SummingCombiner();
IteratorSetting is = new IteratorSetting(1, SummingCombiner.class);
LongCombiner.setEncodingType(is, VarLenEncoder.class);
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("9", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
// try seeking to the beginning of a key that aggregates
ai.seek(newRow(1, 1, 1, 3), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("9", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
// try seeking the middle of a key the aggregates
ai.seek(newRow(1, 1, 1, 2), EMPTY_COL_FAMS, false);
assertFalse(ai.hasTop());
// try seeking to the end of a key the aggregates
ai.seek(newRow(1, 1, 1, 1), EMPTY_COL_FAMS, false);
assertFalse(ai.hasTop());
// try seeking before a key the aggregates
ai.seek(newRow(1, 1, 1, 4), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("9", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
}
@Test
public void test3() throws IOException {
Encoder<Long> encoder = LongCombiner.FIXED_LEN_ENCODER;
TreeMap<Key,Value> tm1 = new TreeMap<>();
// keys that aggregate
newKeyValue(tm1, 1, 1, 1, 1, false, 2L, encoder);
newKeyValue(tm1, 1, 1, 1, 2, false, 3L, encoder);
newKeyValue(tm1, 1, 1, 1, 3, false, 4L, encoder);
// keys that do not aggregate
newKeyValue(tm1, 2, 2, 1, 1, false, 2L, encoder);
newKeyValue(tm1, 2, 2, 1, 2, false, 3L, encoder);
Combiner ai = new SummingCombiner();
IteratorSetting is = new IteratorSetting(1, SummingCombiner.class);
LongCombiner.setEncodingType(is, FixedLenEncoder.class.getName());
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("9", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 2), ai.getTopKey());
assertEquals("3", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 1), ai.getTopKey());
assertEquals("2", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
// seek after key that aggregates
ai.seek(newRow(1, 1, 1, 2), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 2), ai.getTopKey());
assertEquals("3", encoder.decode(ai.getTopValue().get()).toString());
// seek before key that aggregates
ai.seek(newRow(1, 1, 1, 4), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("9", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 2), ai.getTopKey());
assertEquals("3", encoder.decode(ai.getTopValue().get()).toString());
}
@Test
public void testDeepCopy() throws IOException {
Encoder<Long> encoder = LongCombiner.FIXED_LEN_ENCODER;
TreeMap<Key,Value> tm1 = new TreeMap<>();
// keys that aggregate
newKeyValue(tm1, 1, 1, 1, 1, false, 2L, encoder);
newKeyValue(tm1, 1, 1, 1, 2, false, 3L, encoder);
newKeyValue(tm1, 1, 1, 1, 3, false, 4L, encoder);
// keys that do not aggregate
newKeyValue(tm1, 2, 2, 1, 1, false, 2L, encoder);
newKeyValue(tm1, 2, 2, 1, 2, false, 3L, encoder);
Combiner ai = new SummingCombiner();
IteratorSetting is = new IteratorSetting(1, SummingCombiner.class);
LongCombiner.setEncodingType(is, FixedLenEncoder.class.getName());
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
SortedKeyValueIterator<Key,Value> ai2 = ai.deepCopy(null);
SortedKeyValueIterator<Key,Value> ai3 = ai.deepCopy(null);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("9", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 2), ai.getTopKey());
assertEquals("3", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 1), ai.getTopKey());
assertEquals("2", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
// seek after key that aggregates
ai2.seek(newRow(1, 1, 1, 2), EMPTY_COL_FAMS, false);
assertTrue(ai2.hasTop());
assertEquals(newKey(2, 2, 1, 2), ai2.getTopKey());
assertEquals("3", encoder.decode(ai2.getTopValue().get()).toString());
// seek before key that aggregates
ai3.seek(newRow(1, 1, 1, 4), EMPTY_COL_FAMS, false);
assertTrue(ai3.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai3.getTopKey());
assertEquals("9", encoder.decode(ai3.getTopValue().get()).toString());
ai3.next();
assertTrue(ai3.hasTop());
assertEquals(newKey(2, 2, 1, 2), ai3.getTopKey());
assertEquals("3", encoder.decode(ai3.getTopValue().get()).toString());
}
@Test
public void test4() throws IOException {
Encoder<Long> encoder = LongCombiner.STRING_ENCODER;
TreeMap<Key,Value> tm1 = new TreeMap<>();
// keys that do not aggregate
newKeyValue(tm1, 0, 0, 1, 1, false, 7L, encoder);
// keys that aggregate
newKeyValue(tm1, 1, 1, 1, 1, false, 2L, encoder);
newKeyValue(tm1, 1, 1, 1, 2, false, 3L, encoder);
newKeyValue(tm1, 1, 1, 1, 3, false, 4L, encoder);
// keys that do not aggregate
newKeyValue(tm1, 2, 2, 1, 1, false, 2L, encoder);
newKeyValue(tm1, 2, 2, 1, 2, false, 3L, encoder);
Combiner ai = new SummingCombiner();
IteratorSetting is = new IteratorSetting(1, SummingCombiner.class);
LongCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(0, 0, 1, 1), ai.getTopKey());
assertEquals("7", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("9", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 2), ai.getTopKey());
assertEquals("3", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 1), ai.getTopKey());
assertEquals("2", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
// seek test
ai.seek(newRow(0, 0, 1, 0), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("9", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 2), ai.getTopKey());
assertEquals("3", encoder.decode(ai.getTopValue().get()).toString());
// seek after key that aggregates
ai.seek(newRow(1, 1, 1, 2), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 2), ai.getTopKey());
assertEquals("3", encoder.decode(ai.getTopValue().get()).toString());
// combine all columns
is = new IteratorSetting(1, SummingCombiner.class);
LongCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
Combiner.setCombineAllColumns(is, true);
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(0, 0, 1, 1), ai.getTopKey());
assertEquals("7", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("9", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(2, 2, 1, 2), ai.getTopKey());
assertEquals("5", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
}
@Test
public void test5() throws IOException {
Encoder<Long> encoder = LongCombiner.STRING_ENCODER;
// try aggregating across multiple data sets that contain
// the exact same keys w/ different values
TreeMap<Key,Value> tm1 = new TreeMap<>();
newKeyValue(tm1, 1, 1, 1, 1, false, 2L, encoder);
TreeMap<Key,Value> tm2 = new TreeMap<>();
newKeyValue(tm2, 1, 1, 1, 1, false, 3L, encoder);
TreeMap<Key,Value> tm3 = new TreeMap<>();
newKeyValue(tm3, 1, 1, 1, 1, false, 4L, encoder);
Combiner ai = new SummingCombiner();
IteratorSetting is = new IteratorSetting(1, SummingCombiner.class);
LongCombiner.setEncodingType(is, StringEncoder.class);
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
List<SortedKeyValueIterator<Key,Value>> sources = new ArrayList<>(3);
sources.add(new SortedMapIterator(tm1));
sources.add(new SortedMapIterator(tm2));
sources.add(new SortedMapIterator(tm3));
MultiIterator mi = new MultiIterator(sources, true);
ai.init(mi, is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 1), ai.getTopKey());
assertEquals("9", encoder.decode(ai.getTopValue().get()).toString());
}
@Test
public void test6() throws IOException {
Encoder<Long> encoder = LongCombiner.VAR_LEN_ENCODER;
TreeMap<Key,Value> tm1 = new TreeMap<>();
// keys that aggregate
newKeyValue(tm1, 1, 1, 1, 1, false, 2L, encoder);
newKeyValue(tm1, 1, 1, 1, 2, false, 3L, encoder);
newKeyValue(tm1, 1, 1, 1, 3, false, 4L, encoder);
Combiner ai = new SummingCombiner();
IteratorSetting is = new IteratorSetting(1, SummingCombiner.class);
LongCombiner.setEncodingType(is, VarLenEncoder.class.getName());
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
// try seeking to the beginning of a key that aggregates
ai.seek(newRow(1, 1, 1, 3, false), EMPTY_COL_FAMS, false);
assertFalse(ai.hasTop());
}
@Test
public void test7() throws IOException {
Encoder<Long> encoder = LongCombiner.FIXED_LEN_ENCODER;
// test that delete is not aggregated
TreeMap<Key,Value> tm1 = new TreeMap<>();
newKeyValue(tm1, 1, 1, 1, 2, true, 0L, encoder);
newKeyValue(tm1, 1, 1, 1, 3, false, 4L, encoder);
newKeyValue(tm1, 1, 1, 1, 4, false, 3L, encoder);
Combiner ai = new SummingCombiner();
IteratorSetting is = new IteratorSetting(1, SummingCombiner.class);
LongCombiner.setEncodingType(is, SummingCombiner.Type.FIXEDLEN);
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
ai.seek(newRow(1, 1, 1, 4, true), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 4), ai.getTopKey());
assertEquals("7", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 2, true), ai.getTopKey());
assertEquals("0", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
tm1 = new TreeMap<>();
newKeyValue(tm1, 1, 1, 1, 2, true, 0L, encoder);
ai = new SummingCombiner();
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
ai.seek(newRow(1, 1, 1, 4, true), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 2, true), ai.getTopKey());
assertEquals("0", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
}
@Test
public void valueIteratorTest() throws IOException {
TreeMap<Key,Value> tm = new TreeMap<>();
tm.put(new Key("r", "f", "q", 1), new Value("1"));
tm.put(new Key("r", "f", "q", 2), new Value("2"));
SortedMapIterator smi = new SortedMapIterator(tm);
smi.seek(new Range(), EMPTY_COL_FAMS, false);
ValueIterator iter = new ValueIterator(smi);
assertEquals(iter.next().toString(), "2");
assertEquals(iter.next().toString(), "1");
assertFalse(iter.hasNext());
}
@Test
public void sumAllColumns() throws IOException {
TreeMap<Key,Value> tm = new TreeMap<>();
tm.put(new Key("r", "count", "a", 1), new Value("1"));
tm.put(new Key("r", "count", "a", 2), new Value("1"));
tm.put(new Key("r", "count", "b", 3), new Value("1"));
tm.put(new Key("r", "count", "b", 4), new Value("1"));
tm.put(new Key("r", "count", "b", 5), new Value("1"));
tm.put(new Key("r", "count", "c", 6), new Value("1"));
SortedMapIterator smi = new SortedMapIterator(tm);
Combiner iter = new SummingCombiner();
IteratorSetting s = new IteratorSetting(10, "s", SummingCombiner.class);
SummingCombiner.setColumns(s, Collections.singletonList(new IteratorSetting.Column("count")));
SummingCombiner.setEncodingType(s, LongCombiner.StringEncoder.class);
iter.init(smi, s.getOptions(), SCAN_IE);
Combiner iter2 = new SummingCombiner();
IteratorSetting s2 = new IteratorSetting(10, "s2", SummingCombiner.class);
SummingCombiner.setColumns(s2,
Collections.singletonList(new IteratorSetting.Column("count", "a")));
SummingCombiner.setEncodingType(s2, LongCombiner.StringEncoder.class);
iter2.init(iter, s.getOptions(), SCAN_IE);
iter2.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(iter2.hasTop());
assertEquals("2", iter2.getTopValue().toString());
iter2.next();
assertTrue(iter2.hasTop());
assertEquals("3", iter2.getTopValue().toString());
iter2.next();
assertTrue(iter2.hasTop());
assertEquals("1", iter2.getTopValue().toString());
iter2.next();
assertFalse(iter2.hasTop());
}
@Test
public void maxMinTest() throws IOException {
Encoder<Long> encoder = LongCombiner.VAR_LEN_ENCODER;
TreeMap<Key,Value> tm1 = new TreeMap<>();
// keys that aggregate
newKeyValue(tm1, 1, 1, 1, 1, false, 4L, encoder);
newKeyValue(tm1, 1, 1, 1, 2, false, 3L, encoder);
newKeyValue(tm1, 1, 1, 1, 3, false, 2L, encoder);
Combiner ai = new MaxCombiner();
IteratorSetting is = new IteratorSetting(1, SummingCombiner.class);
LongCombiner.setEncodingType(is, SummingCombiner.Type.VARLEN);
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("4", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
ai = new MinCombiner();
ai.init(new SortedMapIterator(tm1), is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertEquals("2", encoder.decode(ai.getTopValue().get()).toString());
ai.next();
assertFalse(ai.hasTop());
}
public static List<Long> nal(Long... longs) {
List<Long> al = new ArrayList<>(longs.length);
Collections.addAll(al, longs);
return al;
}
public static void assertBytesEqual(byte[] a, byte[] b) {
assertEquals(a.length, b.length);
for (int i = 0; i < a.length; i++) {
assertEquals(a[i], b[i]);
}
}
public static void sumArray(Class<? extends Encoder<List<Long>>> encoderClass,
SummingArrayCombiner.Type type) throws IOException, ReflectiveOperationException {
Encoder<List<Long>> encoder = encoderClass.getDeclaredConstructor().newInstance();
TreeMap<Key,Value> tm1 = new TreeMap<>();
// keys that aggregate
newKeyValue(tm1, 1, 1, 1, 1, false, nal(1L, 2L), encoder);
newKeyValue(tm1, 1, 1, 1, 2, false, nal(3L, 4L, 5L), encoder);
newKeyValue(tm1, 1, 1, 1, 3, false, nal(), encoder);
Combiner ai = new SummingArrayCombiner();
IteratorSetting is = new IteratorSetting(1, SummingArrayCombiner.class);
SummingArrayCombiner.setEncodingType(is, type);
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
SortedMapIterator sortedMapIterator = new SortedMapIterator(tm1);
ai.init(sortedMapIterator, is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertBytesEqual(encoder.encode(nal(4L, 6L, 5L)), ai.getTopValue().get());
ai.next();
assertFalse(ai.hasTop());
is.clearOptions();
SummingArrayCombiner.setEncodingType(is, encoderClass);
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
ai.init(sortedMapIterator, is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertBytesEqual(encoder.encode(nal(4L, 6L, 5L)), ai.getTopValue().get());
ai.next();
assertFalse(ai.hasTop());
is.clearOptions();
SummingArrayCombiner.setEncodingType(is, encoderClass.getName());
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
ai.init(sortedMapIterator, is.getOptions(), SCAN_IE);
ai.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(ai.hasTop());
assertEquals(newKey(1, 1, 1, 3), ai.getTopKey());
assertBytesEqual(encoder.encode(nal(4L, 6L, 5L)), ai.getTopValue().get());
ai.next();
assertFalse(ai.hasTop());
is.clearOptions();
SummingArrayCombiner.setEncodingType(is, SummingCombiner.VAR_LEN_ENCODER.getClass().getName());
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
final var isOptions = is.getOptions();
assertThrows(IllegalArgumentException.class,
() -> ai.init(sortedMapIterator, isOptions, SCAN_IE));
is.clearOptions();
SummingArrayCombiner.setEncodingType(is, BadEncoder.class.getName());
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
final var isOptions1 = is.getOptions();
assertThrows(IllegalArgumentException.class,
() -> ai.init(sortedMapIterator, isOptions1, SCAN_IE));
}
public static class BadEncoder implements Encoder<List<Long>> {
@Override
public byte[] encode(List<Long> v) {
return new byte[0];
}
@Override
public List<Long> decode(byte[] b) {
return new ArrayList<>();
}
}
@Test
public void sumArrayTest() throws IOException, ReflectiveOperationException {
sumArray(SummingArrayCombiner.VarLongArrayEncoder.class, SummingArrayCombiner.Type.VARLEN);
sumArray(SummingArrayCombiner.FixedLongArrayEncoder.class, SummingArrayCombiner.Type.FIXEDLEN);
sumArray(SummingArrayCombiner.StringArrayEncoder.class, SummingArrayCombiner.Type.STRING);
}
@Test
public void testEncoders() {
TypedValueCombiner.testEncoder(SummingCombiner.FIXED_LEN_ENCODER, Long.MAX_VALUE);
TypedValueCombiner.testEncoder(SummingCombiner.FIXED_LEN_ENCODER, Long.MIN_VALUE);
TypedValueCombiner.testEncoder(SummingCombiner.FIXED_LEN_ENCODER, 42L);
TypedValueCombiner.testEncoder(SummingCombiner.FIXED_LEN_ENCODER, -42L);
TypedValueCombiner.testEncoder(SummingCombiner.FIXED_LEN_ENCODER, 0L);
TypedValueCombiner.testEncoder(SummingCombiner.VAR_LEN_ENCODER, Long.MAX_VALUE);
TypedValueCombiner.testEncoder(SummingCombiner.VAR_LEN_ENCODER, Long.MIN_VALUE);
TypedValueCombiner.testEncoder(SummingCombiner.VAR_LEN_ENCODER, 42L);
TypedValueCombiner.testEncoder(SummingCombiner.VAR_LEN_ENCODER, -42L);
TypedValueCombiner.testEncoder(SummingCombiner.VAR_LEN_ENCODER, 0L);
TypedValueCombiner.testEncoder(SummingCombiner.STRING_ENCODER, Long.MAX_VALUE);
TypedValueCombiner.testEncoder(SummingCombiner.STRING_ENCODER, Long.MIN_VALUE);
TypedValueCombiner.testEncoder(SummingCombiner.STRING_ENCODER, 42L);
TypedValueCombiner.testEncoder(SummingCombiner.STRING_ENCODER, -42L);
TypedValueCombiner.testEncoder(SummingCombiner.STRING_ENCODER, 0L);
TypedValueCombiner.testEncoder(SummingArrayCombiner.FIXED_LONG_ARRAY_ENCODER,
Arrays.asList(0L, -1L, 10L, Long.MAX_VALUE, Long.MIN_VALUE));
TypedValueCombiner.testEncoder(SummingArrayCombiner.VAR_LONG_ARRAY_ENCODER,
Arrays.asList(0L, -1L, 10L, Long.MAX_VALUE, Long.MIN_VALUE));
TypedValueCombiner.testEncoder(SummingArrayCombiner.STRING_ARRAY_ENCODER,
Arrays.asList(0L, -1L, 10L, Long.MAX_VALUE, Long.MIN_VALUE));
}
@Test
public void testAdds() {
assertEquals(LongCombiner.safeAdd(Long.MIN_VALUE + 5, -10), Long.MIN_VALUE);
assertEquals(LongCombiner.safeAdd(Long.MAX_VALUE - 5, 10), Long.MAX_VALUE);
assertEquals(LongCombiner.safeAdd(Long.MIN_VALUE + 5, -5), Long.MIN_VALUE);
assertEquals(LongCombiner.safeAdd(Long.MAX_VALUE - 5, 5), Long.MAX_VALUE);
}
private TreeMap<Key,Value> readAll(SortedKeyValueIterator<Key,Value> combiner) throws Exception {
TreeMap<Key,Value> ret = new TreeMap<>();
combiner.seek(new Range(), EMPTY_COL_FAMS, false);
while (combiner.hasTop()) {
ret.put(new Key(combiner.getTopKey()), new Value(combiner.getTopValue()));
combiner.next();
}
return ret;
}
private void runDeleteHandlingTest(TreeMap<Key,Value> input, TreeMap<Key,Value> expected,
Boolean rofco, IteratorEnvironment env) throws Exception {
runDeleteHandlingTest(input, expected, rofco, env, false, true);
}
private void runDeleteHandlingTest(TreeMap<Key,Value> input, TreeMap<Key,Value> expected,
Boolean rofco, IteratorEnvironment env, boolean expectedLog) throws Exception {
runDeleteHandlingTest(input, expected, rofco, env, expectedLog, true);
if (expectedLog) {
// run test again... should not see log message again because cache is not cleared
runDeleteHandlingTest(input, expected, rofco, env, true, false);
}
}
private void runDeleteHandlingTest(TreeMap<Key,Value> input, TreeMap<Key,Value> expected,
Boolean rofco, IteratorEnvironment env, boolean expectedLog, boolean clearLogMsgCache)
throws Exception {
boolean deepCopy = expected == null;
if (clearLogMsgCache) {
CombinerTestUtil.clearLogCache();
}
Combiner ai = new SummingCombiner();
IteratorSetting is = new IteratorSetting(1, SummingCombiner.class);
SummingCombiner.setEncodingType(is, LongCombiner.StringEncoder.class);
Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf001")));
if (rofco != null) {
Combiner.setReduceOnFullCompactionOnly(is, rofco);
}
ai.init(new SortedMapIterator(input), is.getOptions(), env);
if (deepCopy) {
assertEquals(expected, readAll(ai.deepCopy(env)));
}
assertEquals(expected, readAll(ai));
long logSize = CombinerTestUtil.cacheSize();
if (expectedLog) {
assertTrue(logSize > 0, "Expected >0 log messages, but got : " + logSize);
} else {
assertEquals(0, logSize, "Expected 0 log messages, but got : " + logSize);
}
}
@Test
public void testDeleteHandling() throws Exception {
Encoder<Long> encoder = LongCombiner.STRING_ENCODER;
TreeMap<Key,Value> input = new TreeMap<>();
IteratorEnvironment paritalMajcIe = new CombinerIteratorEnvironment(IteratorScope.majc, false);
IteratorEnvironment fullMajcIe = new CombinerIteratorEnvironment(IteratorScope.majc, true);
// keys that aggregate
newKeyValue(input, 1, 1, 1, 1, false, 4L, encoder);
newKeyValue(input, 1, 1, 1, 2, true, 0L, encoder);
newKeyValue(input, 1, 1, 1, 3, false, 2L, encoder);
newKeyValue(input, 1, 1, 1, 4, false, 9L, encoder);
TreeMap<Key,Value> expected = new TreeMap<>();
newKeyValue(expected, 1, 1, 1, 1, false, 4L, encoder);
newKeyValue(expected, 1, 1, 1, 2, true, 0L, encoder);
newKeyValue(expected, 1, 1, 1, 4, false, 11L, encoder);
runDeleteHandlingTest(input, input, true, paritalMajcIe);
runDeleteHandlingTest(input, expected, true, fullMajcIe);
runDeleteHandlingTest(input, expected, true, SCAN_IE);
runDeleteHandlingTest(input, expected, false, fullMajcIe, true);
runDeleteHandlingTest(input, expected, false, SCAN_IE);
runDeleteHandlingTest(input, expected, false, paritalMajcIe, true);
runDeleteHandlingTest(input, expected, null, paritalMajcIe, true);
runDeleteHandlingTest(input, expected, null, fullMajcIe, true);
}
/**
* Tests the Lossy option will ignore errors in TypedValueCombiner. Uses SummingArrayCombiner to
* generate error.
*/
@Test
public void testLossyOption() throws IOException {
Encoder<List<Long>> encoder = new SummingArrayCombiner.VarLongArrayEncoder();
TreeMap<Key,Value> tm1 = new TreeMap<>();
// keys that aggregate
tm1.put(newKey(1, 1, 1, 1, false), new Value("badValue"));
newKeyValue(tm1, 1, 1, 1, 2, false, nal(3L, 4L, 5L), encoder);
newKeyValue(tm1, 1, 1, 1, 3, false, nal(), encoder);
SummingArrayCombiner summingArrayCombiner = new SummingArrayCombiner();
IteratorSetting iteratorSetting = new IteratorSetting(1, SummingArrayCombiner.class);
SummingArrayCombiner.setEncodingType(iteratorSetting, SummingArrayCombiner.Type.VARLEN);
Combiner.setColumns(iteratorSetting,
Collections.singletonList(new IteratorSetting.Column("cf001")));
// lossy = true so ignore bad value
TypedValueCombiner.setLossyness(iteratorSetting, true);
assertTrue(summingArrayCombiner.validateOptions(iteratorSetting.getOptions()));
summingArrayCombiner.init(new SortedMapIterator(tm1), iteratorSetting.getOptions(), SCAN_IE);
final Range range = new Range();
summingArrayCombiner.seek(range, EMPTY_COL_FAMS, false);
assertTrue(summingArrayCombiner.hasTop());
assertEquals(newKey(1, 1, 1, 3), summingArrayCombiner.getTopKey());
assertBytesEqual(encoder.encode(nal(3L, 4L, 5L)), summingArrayCombiner.getTopValue().get());
summingArrayCombiner.next();
assertFalse(summingArrayCombiner.hasTop());
// lossy = false throw error for bad value
TypedValueCombiner.setLossyness(iteratorSetting, false);
assertTrue(summingArrayCombiner.validateOptions(iteratorSetting.getOptions()));
summingArrayCombiner.init(new SortedMapIterator(tm1), iteratorSetting.getOptions(), SCAN_IE);
assertThrows(ValueFormatException.class,
() -> summingArrayCombiner.seek(range, EMPTY_COL_FAMS, false));
}
}
| 9,504 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/TestCfCqSlice.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.accumulo.core.client.lexicoder.Lexicoder;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.ValueFormatException;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
public abstract class TestCfCqSlice {
private static final Range INFINITY = new Range();
private static final Lexicoder<Long> LONG_LEX = new ReadableLongLexicoder(4);
private static final AtomicLong ROW_ID_GEN = new AtomicLong();
private static final boolean easyThereSparky = false;
private static final int LR_DIM = easyThereSparky ? 5 : 50;
private static final Map<String,String> EMPTY_OPTS = Collections.emptyMap();
private static final Set<ByteSequence> EMPTY_CF_SET = Collections.emptySet();
protected abstract Class<? extends SortedKeyValueIterator<Key,Value>> getFilterClass();
private static TreeMap<Key,Value> data;
@BeforeAll
public static void setupData() {
data = createMap(LR_DIM, LR_DIM, LR_DIM);
}
@AfterAll
public static void clearData() {
data = null;
}
@Test
public void testAllRowsFullSlice() throws Exception {
boolean[][][] foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
loadKvs(foundKvs, EMPTY_OPTS, INFINITY);
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
assertTrue(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must be found in scan");
}
}
}
}
@Test
public void testSingleRowFullSlice() throws Exception {
boolean[][][] foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
int rowId = LR_DIM / 2;
loadKvs(foundKvs, EMPTY_OPTS, Range.exact(new Text(LONG_LEX.encode((long) rowId))));
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
if (rowId == i) {
assertTrue(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must be found in scan");
} else {
assertFalse(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must not be found in scan");
}
}
}
}
}
@Test
public void testAllRowsSlice() throws Exception {
boolean[][][] foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
long sliceMinCf = 20;
long sliceMinCq = 30;
long sliceMaxCf = 25;
long sliceMaxCq = 35;
assertTrue(sliceMinCf < LR_DIM, "slice param must be less than LR_DIM");
assertTrue(sliceMinCq < LR_DIM, "slice param must be less than LR_DIM");
assertTrue(sliceMaxCf < LR_DIM, "slice param must be less than LR_DIM");
assertTrue(sliceMaxCq < LR_DIM, "slice param must be less than LR_DIM");
Map<String,String> opts = new HashMap<>();
opts.put(CfCqSliceOpts.OPT_MIN_CF, new String(LONG_LEX.encode(sliceMinCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MIN_CQ, new String(LONG_LEX.encode(sliceMinCq), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CF, new String(LONG_LEX.encode(sliceMaxCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CQ, new String(LONG_LEX.encode(sliceMaxCq), UTF_8));
loadKvs(foundKvs, opts, INFINITY);
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
if (j >= sliceMinCf && j <= sliceMaxCf && k >= sliceMinCq && k <= sliceMaxCq) {
assertTrue(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must be found in scan");
} else {
assertFalse(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must not be found in scan");
}
}
}
}
}
@Test
public void testSingleColumnSlice() throws Exception {
boolean[][][] foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
long sliceMinCf = 20;
long sliceMinCq = 20;
long sliceMaxCf = 20;
long sliceMaxCq = 20;
Map<String,String> opts = new HashMap<>();
opts.put(CfCqSliceOpts.OPT_MIN_CF, new String(LONG_LEX.encode(sliceMinCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MIN_CQ, new String(LONG_LEX.encode(sliceMinCq), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CF, new String(LONG_LEX.encode(sliceMaxCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CQ, new String(LONG_LEX.encode(sliceMaxCq), UTF_8));
loadKvs(foundKvs, opts, INFINITY);
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
if (j == sliceMinCf && k == sliceMinCq) {
assertTrue(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must be found in scan");
} else {
assertFalse(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must not be found in scan");
}
}
}
}
}
@Test
public void testSingleColumnSliceByExclude() throws Exception {
boolean[][][] foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
long sliceMinCf = 20;
long sliceMinCq = 20;
long sliceMaxCf = 22;
long sliceMaxCq = 22;
Map<String,String> opts = new HashMap<>();
opts.put(CfCqSliceOpts.OPT_MIN_CF, new String(LONG_LEX.encode(sliceMinCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MIN_CQ, new String(LONG_LEX.encode(sliceMinCq), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CF, new String(LONG_LEX.encode(sliceMaxCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CQ, new String(LONG_LEX.encode(sliceMaxCq), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_INCLUSIVE, "false");
opts.put(CfCqSliceOpts.OPT_MIN_INCLUSIVE, "false");
loadKvs(foundKvs, opts, INFINITY);
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
if (j == 21 && k == 21) {
assertTrue(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must be found in scan");
} else {
assertFalse(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must not be found in scan");
}
}
}
}
}
@Test
public void testAllCfsCqSlice() throws Exception {
boolean[][][] foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
long sliceMinCq = 10;
long sliceMaxCq = 30;
Map<String,String> opts = new HashMap<>();
opts.put(CfCqSliceOpts.OPT_MIN_CQ, new String(LONG_LEX.encode(sliceMinCq), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CQ, new String(LONG_LEX.encode(sliceMaxCq), UTF_8));
loadKvs(foundKvs, opts, INFINITY);
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
if (k >= sliceMinCq && k <= sliceMaxCq) {
assertTrue(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must be found in scan");
} else {
assertFalse(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must not be found in scan");
}
}
}
}
}
@Test
public void testSliceCfsAllCqs() throws Exception {
boolean[][][] foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
long sliceMinCf = 10;
long sliceMaxCf = 30;
Map<String,String> opts = new HashMap<>();
opts.put(CfCqSliceOpts.OPT_MIN_CF, new String(LONG_LEX.encode(sliceMinCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CF, new String(LONG_LEX.encode(sliceMaxCf), UTF_8));
loadKvs(foundKvs, opts, INFINITY);
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
if (j >= sliceMinCf && j <= sliceMaxCf) {
assertTrue(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must be found in scan");
} else {
assertFalse(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must not be found in scan");
}
}
}
}
}
@Test
public void testEmptySlice() throws Exception {
boolean[][][] foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
long sliceMinCf = LR_DIM + 1;
long sliceMinCq = LR_DIM + 1;
long sliceMaxCf = LR_DIM + 1;
long sliceMaxCq = LR_DIM + 1;
Map<String,String> opts = new HashMap<>();
opts.put(CfCqSliceOpts.OPT_MIN_CF, new String(LONG_LEX.encode(sliceMinCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MIN_CQ, new String(LONG_LEX.encode(sliceMinCq), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CF, new String(LONG_LEX.encode(sliceMaxCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CQ, new String(LONG_LEX.encode(sliceMaxCq), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_INCLUSIVE, "false");
opts.put(CfCqSliceOpts.OPT_MIN_INCLUSIVE, "false");
loadKvs(foundKvs, opts, INFINITY);
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
assertFalse(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must not be found in scan");
}
}
}
}
@Test
public void testStackedFilters() throws Exception {
Map<String,String> firstOpts = new HashMap<>();
Map<String,String> secondOpts = new HashMap<>();
boolean[][][] foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
long sliceMinCf = 20;
long sliceMaxCf = 25;
long sliceMinCq = 30;
long sliceMaxCq = 35;
assertTrue(sliceMinCf < LR_DIM, "slice param must be less than LR_DIM");
assertTrue(sliceMinCq < LR_DIM, "slice param must be less than LR_DIM");
assertTrue(sliceMaxCf < LR_DIM, "slice param must be less than LR_DIM");
assertTrue(sliceMaxCq < LR_DIM, "slice param must be less than LR_DIM");
firstOpts.put(CfCqSliceOpts.OPT_MIN_CF, new String(LONG_LEX.encode(sliceMinCf), UTF_8));
firstOpts.put(CfCqSliceOpts.OPT_MAX_CF, new String(LONG_LEX.encode(sliceMaxCf), UTF_8));
secondOpts.put(CfCqSliceOpts.OPT_MIN_CQ, new String(LONG_LEX.encode(sliceMinCq), UTF_8));
secondOpts.put(CfCqSliceOpts.OPT_MAX_CQ, new String(LONG_LEX.encode(sliceMaxCq), UTF_8));
SortedKeyValueIterator<Key,Value> skvi =
getFilterClass().getDeclaredConstructor().newInstance();
skvi.init(new SortedMapIterator(data), firstOpts, null);
loadKvs(skvi.deepCopy(null), foundKvs, secondOpts, INFINITY);
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
if (j >= sliceMinCf && j <= sliceMaxCf && k >= sliceMinCq && k <= sliceMaxCq) {
assertTrue(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must be found in scan");
} else {
assertFalse(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must not be found in scan");
}
}
}
}
}
@Test
public void testSeekMinExclusive() throws Exception {
boolean[][][] foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
long sliceMinCf = 20;
long sliceMinCq = 30;
long sliceMaxCf = 25;
long sliceMaxCq = 35;
assertTrue(sliceMinCf < LR_DIM, "slice param must be less than LR_DIM");
assertTrue(sliceMinCq < LR_DIM, "slice param must be less than LR_DIM");
assertTrue(sliceMaxCf < LR_DIM, "slice param must be less than LR_DIM");
assertTrue(sliceMaxCq < LR_DIM, "slice param must be less than LR_DIM");
Map<String,String> opts = new HashMap<>();
opts.put(CfCqSliceOpts.OPT_MIN_CF, new String(LONG_LEX.encode(sliceMinCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MIN_INCLUSIVE, "false");
opts.put(CfCqSliceOpts.OPT_MIN_CQ, new String(LONG_LEX.encode(sliceMinCq), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CF, new String(LONG_LEX.encode(sliceMaxCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CQ, new String(LONG_LEX.encode(sliceMaxCq), UTF_8));
Range startsAtMinCf = new Range(new Key(LONG_LEX.encode(0L), LONG_LEX.encode(sliceMinCf),
LONG_LEX.encode(sliceMinCq), new byte[] {}, Long.MAX_VALUE), null);
loadKvs(foundKvs, opts, startsAtMinCf);
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
if (j > sliceMinCf && j <= sliceMaxCf && k > sliceMinCq && k <= sliceMaxCq) {
assertTrue(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must be found in scan");
} else {
assertFalse(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must not be found in scan");
}
}
}
}
foundKvs = new boolean[LR_DIM][LR_DIM][LR_DIM];
sliceMinCq = 0;
sliceMaxCq = 10;
opts.put(CfCqSliceOpts.OPT_MIN_CF, new String(LONG_LEX.encode(sliceMinCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MIN_INCLUSIVE, "false");
opts.put(CfCqSliceOpts.OPT_MIN_CQ, new String(LONG_LEX.encode(sliceMinCq), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CF, new String(LONG_LEX.encode(sliceMaxCf), UTF_8));
opts.put(CfCqSliceOpts.OPT_MAX_CQ, new String(LONG_LEX.encode(sliceMaxCq), UTF_8));
loadKvs(foundKvs, opts, INFINITY);
for (int i = 0; i < LR_DIM; i++) {
for (int j = 0; j < LR_DIM; j++) {
for (int k = 0; k < LR_DIM; k++) {
if (j > sliceMinCf && j <= sliceMaxCf && k > sliceMinCq && k <= sliceMaxCq) {
assertTrue(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must be found in scan");
} else {
assertFalse(foundKvs[i][j][k],
"(r, cf, cq) == (" + i + ", " + j + ", " + k + ") must not be found in scan");
}
}
}
}
}
private void loadKvs(boolean[][][] foundKvs, Map<String,String> options, Range range)
throws Exception {
loadKvs(new SortedMapIterator(data), foundKvs, options, range);
}
private void loadKvs(SortedKeyValueIterator<Key,Value> parent, boolean[][][] foundKvs,
Map<String,String> options, Range range) throws Exception {
SortedKeyValueIterator<Key,Value> skvi =
getFilterClass().getDeclaredConstructor().newInstance();
skvi.init(parent, options, null);
skvi.seek(range, EMPTY_CF_SET, false);
while (skvi.hasTop()) {
Key k = skvi.getTopKey();
int row = LONG_LEX.decode(k.getRow().copyBytes()).intValue();
int cf = LONG_LEX.decode(k.getColumnFamily().copyBytes()).intValue();
int cq = LONG_LEX.decode(k.getColumnQualifier().copyBytes()).intValue();
assertFalse(foundKvs[row][cf][cq], "Duplicate " + row + " " + cf + " " + cq);
foundKvs[row][cf][cq] = true;
if (RANDOM.get().nextInt(100) == 0) {
skvi.seek(new Range(k, false, range.getEndKey(), range.isEndKeyInclusive()), EMPTY_CF_SET,
false);
} else {
skvi.next();
}
}
}
/**
* Rows 0..(LR_DIM - 1) will each have LR_DIM CFs, each with LR_DIM CQs
*
* For instance if LR_DIM is 3, (cf,cq) r: val
*
* (0,0) (0,1) (0,2) (1,0) (1,1) (1,2) (2,0) (2,1) (2,2) 0 0 1 2 3 4 5 6 7 8 1 9 10 11 12 13 14 15
* 16 17 2 18 19 20 21 22 23 24 25 26
*/
static TreeMap<Key,Value> createMap(int numRows, int numCfs, int numCqs) {
TreeMap<Key,Value> data = new TreeMap<>();
for (int i = 0; i < numRows; i++) {
byte[] rowId = LONG_LEX.encode(ROW_ID_GEN.getAndIncrement());
for (int j = 0; j < numCfs; j++) {
for (int k = 0; k < numCqs; k++) {
byte[] cf = LONG_LEX.encode((long) j);
byte[] cq = LONG_LEX.encode((long) k);
byte[] val = LONG_LEX.encode((long) (i * numCfs + j * numCqs + k));
data.put(new Key(rowId, cf, cq, new byte[0], 9), new Value(val));
}
}
}
return data;
}
static class ReadableLongLexicoder implements Lexicoder<Long> {
final String fmtStr;
public ReadableLongLexicoder() {
this(20);
}
public ReadableLongLexicoder(int numDigits) {
fmtStr = "%0" + numDigits + "d";
}
@Override
public byte[] encode(Long l) {
return String.format(fmtStr, l).getBytes(UTF_8);
}
@Override
public Long decode(byte[] b) throws ValueFormatException {
return Long.parseLong(new String(b, UTF_8));
}
}
}
| 9,505 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/RowEncodingIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class RowEncodingIteratorTest {
private static final class DummyIteratorEnv implements IteratorEnvironment {
@Override
public IteratorUtil.IteratorScope getIteratorScope() {
return IteratorUtil.IteratorScope.scan;
}
@Override
public boolean isFullMajorCompaction() {
return false;
}
}
private static final class RowEncodingIteratorImpl extends RowEncodingIterator {
public static SortedMap<Key,Value> decodeRow(Value rowValue) throws IOException {
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(rowValue.get()));
int numKeys = dis.readInt();
List<Key> decodedKeys = new ArrayList<>();
List<Value> decodedValues = new ArrayList<>();
SortedMap<Key,Value> out = new TreeMap<>();
for (int i = 0; i < numKeys; i++) {
Key k = new Key();
k.readFields(dis);
decodedKeys.add(k);
}
int numValues = dis.readInt();
for (int i = 0; i < numValues; i++) {
Value v = new Value();
v.readFields(dis);
decodedValues.add(v);
}
if (decodedKeys.size() != decodedValues.size()) {
throw new IOException("Number of keys doesn't match number of values");
}
for (int i = 0; i < decodedKeys.size(); i++) {
out.put(decodedKeys.get(i), decodedValues.get(i));
}
return out;
}
@Override
public SortedMap<Key,Value> rowDecoder(Key rowKey, Value rowValue) throws IOException {
return decodeRow(rowValue);
}
@Override
public Value rowEncoder(List<Key> keys, List<Value> values) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
dos.writeInt(keys.size());
for (Key key : keys) {
key.write(dos);
}
dos.writeInt(values.size());
for (Value v : values) {
v.write(dos);
}
dos.flush();
return new Value(baos.toByteArray());
}
}
private void pkv(SortedMap<Key,Value> map, String row, String cf, String cq, String cv, long ts,
byte[] val) {
map.put(new Key(new Text(row), new Text(cf), new Text(cq), new Text(cv), ts),
new Value(val, true));
}
@Test
public void testEncodeAll() throws IOException {
byte[] kbVal = new byte[1024];
// This code is shamelessly borrowed from the WholeRowIteratorTest.
SortedMap<Key,Value> map1 = new TreeMap<>();
pkv(map1, "row1", "cf1", "cq1", "cv1", 5, kbVal);
pkv(map1, "row1", "cf1", "cq2", "cv1", 6, kbVal);
SortedMap<Key,Value> map2 = new TreeMap<>();
pkv(map2, "row2", "cf1", "cq1", "cv1", 5, kbVal);
pkv(map2, "row2", "cf1", "cq2", "cv1", 6, kbVal);
SortedMap<Key,Value> map3 = new TreeMap<>();
pkv(map3, "row3", "cf1", "cq1", "cv1", 5, kbVal);
pkv(map3, "row3", "cf1", "cq2", "cv1", 6, kbVal);
SortedMap<Key,Value> map = new TreeMap<>();
map.putAll(map1);
map.putAll(map2);
map.putAll(map3);
SortedMapIterator src = new SortedMapIterator(map);
Range range = new Range(new Text("row1"), true, new Text("row2"), true);
RowEncodingIteratorImpl iter = new RowEncodingIteratorImpl();
Map<String,String> bigBufferOpts = new HashMap<>();
bigBufferOpts.put(RowEncodingIterator.MAX_BUFFER_SIZE_OPT, "3K");
iter.init(src, bigBufferOpts, new DummyIteratorEnv());
iter.seek(range, new ArrayList<>(), false);
assertTrue(iter.hasTop());
assertEquals(map1, RowEncodingIteratorImpl.decodeRow(iter.getTopValue()));
// simulate something continuing using the last key from the iterator
// this is what client and server code will do
range = new Range(iter.getTopKey(), false, range.getEndKey(), range.isEndKeyInclusive());
iter.seek(range, new ArrayList<>(), false);
assertTrue(iter.hasTop());
assertEquals(map2, RowEncodingIteratorImpl.decodeRow(iter.getTopValue()));
iter.next();
assertFalse(iter.hasTop());
}
@Test
public void testEncodeSome() throws IOException {
byte[] kbVal = new byte[1024];
// This code is shamelessly borrowed from the WholeRowIteratorTest.
SortedMap<Key,Value> map1 = new TreeMap<>();
pkv(map1, "row1", "cf1", "cq1", "cv1", 5, kbVal);
pkv(map1, "row1", "cf1", "cq2", "cv1", 6, kbVal);
SortedMap<Key,Value> map = new TreeMap<>();
map.putAll(map1);
SortedMapIterator src = new SortedMapIterator(map);
Range range = new Range(new Text("row1"), true, new Text("row2"), true);
RowEncodingIteratorImpl iter = new RowEncodingIteratorImpl();
Map<String,String> bigBufferOpts = new HashMap<>();
bigBufferOpts.put(RowEncodingIterator.MAX_BUFFER_SIZE_OPT, "1K");
iter.init(src, bigBufferOpts, new DummyIteratorEnv());
assertThrows(IllegalArgumentException.class, () -> iter.seek(range, new ArrayList<>(), false));
// IllegalArgumentException should be thrown as we can't fit the whole row into its buffer
}
}
| 9,506 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/TransformingIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.WrappingIterator;
import org.apache.accumulo.core.iteratorsImpl.system.ColumnFamilySkippingIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.accumulo.core.iteratorsImpl.system.VisibilityFilter;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.hadoop.io.Text;
import org.easymock.EasyMock;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class TransformingIteratorTest {
private static Authorizations authorizations =
new Authorizations("vis0", "vis1", "vis2", "vis3", "vis4");
private static final Map<String,String> EMPTY_OPTS = Map.of();
private TransformingIterator titer;
private TreeMap<Key,Value> data = new TreeMap<>();
@BeforeEach
public void createData() {
data.clear();
generateRow(data, "row1");
generateRow(data, "row2");
generateRow(data, "row3");
}
private void setUpTransformIterator(Class<? extends TransformingIterator> clazz)
throws IOException {
setUpTransformIterator(clazz, true);
}
private void setUpTransformIterator(Class<? extends TransformingIterator> clazz,
boolean setupAuths) throws IOException {
SortedMapIterator source = new SortedMapIterator(data);
ColumnFamilySkippingIterator cfsi = new ColumnFamilySkippingIterator(source);
SortedKeyValueIterator<Key,Value> visFilter =
VisibilityFilter.wrap(cfsi, authorizations, new byte[0]);
ReuseIterator reuserIter = new ReuseIterator();
reuserIter.init(visFilter, EMPTY_OPTS, null);
try {
titer = clazz.getDeclaredConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new IllegalStateException(e);
}
IteratorEnvironment iterEnv = EasyMock.createMock(IteratorEnvironment.class);
EasyMock.expect(iterEnv.getIteratorScope()).andReturn(IteratorScope.scan).anyTimes();
EasyMock.replay(iterEnv);
Map<String,String> opts;
if (setupAuths) {
IteratorSetting cfg = new IteratorSetting(21, clazz);
TransformingIterator.setAuthorizations(cfg,
new Authorizations("vis0", "vis1", "vis2", "vis3"));
opts = cfg.getOptions();
} else {
opts = Map.of();
}
titer.init(reuserIter, opts, iterEnv);
}
@Test
public void testIdentityScan() throws Exception {
setUpTransformIterator(IdentityKeyTransformingIterator.class);
// This is just an identity scan, but with the "reuse" iterator that reuses
// the same key/value pair for every getTopKey/getTopValue call. The code
// will always return the final key/value if we didn't copy the original key
// in the iterator.
TreeMap<Key,Value> expected = new TreeMap<>();
for (int row = 1; row <= 3; ++row) {
for (int cf = 1; cf <= 3; ++cf) {
for (int cq = 1; cq <= 3; ++cq) {
for (int cv = 1; cv <= 3; ++cv) {
putExpected(expected, row, cf, cq, cv, null);
}
}
}
}
checkExpected(expected);
}
@Test
public void testNoRangeScan() throws Exception {
List<Class<? extends ReversingKeyTransformingIterator>> classes = new ArrayList<>();
classes.add(ColFamReversingKeyTransformingIterator.class);
classes.add(ColQualReversingKeyTransformingIterator.class);
classes.add(ColVisReversingKeyTransformingIterator.class);
// Test transforming col fam, col qual, col vis
for (Class<? extends ReversingKeyTransformingIterator> clazz : classes) {
setUpTransformIterator(clazz);
// All rows with visibilities reversed
TransformingIterator iter = clazz.getDeclaredConstructor().newInstance();
TreeMap<Key,Value> expected = new TreeMap<>();
for (int row = 1; row <= 3; ++row) {
for (int cf = 1; cf <= 3; ++cf) {
for (int cq = 1; cq <= 3; ++cq) {
for (int cv = 1; cv <= 3; ++cv) {
putExpected(expected, row, cf, cq, cv, iter.getKeyPrefix());
}
}
}
}
checkExpected(expected);
}
}
@Test
public void testVisbilityFiltering() throws Exception {
// Should return nothing since we produced visibilities that can't be seen
setUpTransformIterator(BadVisKeyTransformingIterator.class);
checkExpected(new TreeMap<>());
// Do a "reverse" on the visibility (vis1 -> vis2, vis2 -> vis3, vis3 -> vis0)
// Source data has vis1, vis2, vis3 so vis0 is a new one that is introduced.
// Make sure it shows up in the output with the default test auths which include
// vis0.
setUpTransformIterator(ColVisReversingKeyTransformingIterator.class);
TreeMap<Key,Value> expected = new TreeMap<>();
for (int row = 1; row <= 3; ++row) {
for (int cf = 1; cf <= 3; ++cf) {
for (int cq = 1; cq <= 3; ++cq) {
for (int cv = 1; cv <= 3; ++cv) {
putExpected(expected, row, cf, cq, cv, PartialKey.ROW_COLFAM_COLQUAL);
}
}
}
}
checkExpected(expected);
}
@Test
public void testCreatingIllegalVisbility() throws Exception {
// illegal visibility created by transform should be filtered on scan, even if evaluation is
// done
setUpTransformIterator(IllegalVisKeyTransformingIterator.class, false);
checkExpected(new TreeMap<>());
// ensure illegal vis is suppressed when evaluations is done
setUpTransformIterator(IllegalVisKeyTransformingIterator.class);
checkExpected(new TreeMap<>());
}
@Test
public void testRangeStart() throws Exception {
setUpTransformIterator(ColVisReversingKeyTransformingIterator.class);
TreeMap<Key,Value> expected = new TreeMap<>();
putExpected(expected, 1, 2, 2, 1, PartialKey.ROW_COLFAM_COLQUAL); // before the range start, but
// transforms in the range
putExpected(expected, 1, 2, 2, 2, PartialKey.ROW_COLFAM_COLQUAL);
checkExpected(new Range(new Key("row1", "cf2", "cq2", "vis1"), true,
new Key("row1", "cf2", "cq3"), false), expected);
}
@Test
public void testRangeEnd() throws Exception {
setUpTransformIterator(ColVisReversingKeyTransformingIterator.class);
TreeMap<Key,Value> expected = new TreeMap<>();
// putExpected(expected, 1, 2, 2, 1, part); // transforms vis outside range end
putExpected(expected, 1, 2, 2, 2, PartialKey.ROW_COLFAM_COLQUAL);
putExpected(expected, 1, 2, 2, 3, PartialKey.ROW_COLFAM_COLQUAL);
checkExpected(new Range(new Key("row1", "cf2", "cq2"), true,
new Key("row1", "cf2", "cq2", "vis2"), false), expected);
}
@Test
public void testPrefixRange() throws Exception {
setUpTransformIterator(ColFamReversingKeyTransformingIterator.class);
// Set a range that is before all of the untransformed data. However,
// the data with untransformed col fam cf3 will transform to cf0 and
// be inside the range.
TreeMap<Key,Value> expected = new TreeMap<>();
for (int cq = 1; cq <= 3; ++cq) {
for (int cv = 1; cv <= 3; ++cv) {
putExpected(expected, 1, 3, cq, cv, PartialKey.ROW);
}
}
checkExpected(new Range(new Key("row1", "cf0"), true, new Key("row1", "cf1"), false), expected);
}
@Test
public void testPostfixRange() throws Exception {
// Set a range that's after all data and make sure we don't
// somehow return something.
setUpTransformIterator(ColFamReversingKeyTransformingIterator.class);
checkExpected(new Range(new Key("row4"), null), new TreeMap<>());
}
@Test
public void testReplaceKeyParts() {
TransformingIterator it = new IdentityKeyTransformingIterator();
Key originalKey = new Key("r", "cf", "cq", "cv", 42);
originalKey.setDeleted(true);
Key newKey = it.replaceColumnFamily(originalKey, new Text("test"));
assertEquals(createDeleteKey("r", "test", "cq", "cv", 42), newKey);
newKey = it.replaceColumnQualifier(originalKey, new Text("test"));
assertEquals(createDeleteKey("r", "cf", "test", "cv", 42), newKey);
newKey = it.replaceColumnVisibility(originalKey, new Text("test"));
assertEquals(createDeleteKey("r", "cf", "cq", "test", 42), newKey);
newKey = it.replaceKeyParts(originalKey, new Text("testCQ"), new Text("testCV"));
assertEquals(createDeleteKey("r", "cf", "testCQ", "testCV", 42), newKey);
newKey =
it.replaceKeyParts(originalKey, new Text("testCF"), new Text("testCQ"), new Text("testCV"));
assertEquals(createDeleteKey("r", "testCF", "testCQ", "testCV", 42), newKey);
}
@Test
public void testFetchColumnFamilites() throws Exception {
// In this test, we are fetching column family cf2, which is in
// the transformed space. The source column family that will
// transform into cf2 is cf1, so that is the column family we
// put in the expectations.
int expectedCF = 1;
setUpTransformIterator(ColFamReversingKeyTransformingIterator.class);
TreeMap<Key,Value> expected = new TreeMap<>();
for (int row = 1; row <= 3; ++row) {
for (int cq = 1; cq <= 3; ++cq) {
for (int cv = 1; cv <= 3; ++cv) {
putExpected(expected, row, expectedCF, cq, cv, PartialKey.ROW);
}
}
}
checkExpected(expected, "cf2");
}
@Test
public void testDeepCopy() throws Exception {
ColumnVisibility vis1 = new ColumnVisibility("vis1");
ColumnVisibility vis3 = new ColumnVisibility("vis3");
data.clear();
Value ev = new Value("");
data.put(new Key("shard001", "foo", "doc02", vis1, 78), ev);
data.put(new Key("shard001", "dog", "doc02", vis3, 78), ev);
data.put(new Key("shard001", "cat", "doc02", vis3, 78), ev);
data.put(new Key("shard001", "bar", "doc03", vis1, 78), ev);
data.put(new Key("shard001", "dog", "doc03", vis3, 78), ev);
data.put(new Key("shard001", "cat", "doc03", vis3, 78), ev);
setUpTransformIterator(ColVisReversingKeyTransformingIterator.class);
IntersectingIterator iiIter = new IntersectingIterator();
IteratorSetting iicfg = new IteratorSetting(22, IntersectingIterator.class);
IntersectingIterator.setColumnFamilies(iicfg,
new Text[] {new Text("foo"), new Text("dog"), new Text("cat")});
iiIter.init(titer, iicfg.getOptions(), null);
iiIter.seek(new Range(), new HashSet<>(), false);
assertTrue(iiIter.hasTop());
Key docKey = iiIter.getTopKey();
assertEquals("shard001", docKey.getRowData().toString());
assertEquals("doc02", docKey.getColumnQualifierData().toString());
iiIter.next();
assertFalse(iiIter.hasTop());
}
@Test
public void testCompactionScanFetchingColumnFamilies() throws Exception {
// In this test, we are fetching column family cf2, which is in
// the transformed space. The source column family that will
// transform into cf2 is cf1, so that is the column family we
// put in the expectations.
int expectedCF = 1;
setUpTransformIterator(ColFamReversingCompactionKeyTransformingIterator.class);
TreeMap<Key,Value> expected = new TreeMap<>();
for (int row = 1; row <= 3; ++row) {
for (int cq = 1; cq <= 3; ++cq) {
for (int cv = 1; cv <= 3; ++cv) {
putExpected(expected, row, expectedCF, cq, cv, PartialKey.ROW);
}
}
}
checkExpected(expected, "cf2");
}
@Test
public void testCompactionDoesntFilterVisibilities() throws Exception {
// In scan mode, this should return nothing since it produces visibilities
// the user can't see. In compaction mode, however, the visibilities
// should still show up.
setUpTransformIterator(BadVisCompactionKeyTransformingIterator.class);
TreeMap<Key,Value> expected = new TreeMap<>();
for (int rowID = 1; rowID <= 3; ++rowID) {
for (int cfID = 1; cfID <= 3; ++cfID) {
for (int cqID = 1; cqID <= 3; ++cqID) {
for (int cvID = 1; cvID <= 3; ++cvID) {
String row = "row" + rowID;
String cf = "cf" + cfID;
String cq = "cq" + cqID;
String cv = "badvis";
long ts = 100 * cfID + 10 * cqID + cvID;
String val = "val" + ts;
expected.put(new Key(row, cf, cq, cv, ts), new Value(val));
}
}
}
}
checkExpected(expected);
}
@Test
public void testCompactionAndIllegalVisibility() throws Exception {
setUpTransformIterator(IllegalVisCompactionKeyTransformingIterator.class);
assertThrows(Exception.class, () -> checkExpected(new TreeMap<>()));
}
@Test
public void testDupes() throws Exception {
setUpTransformIterator(DupeTransformingIterator.class);
titer.seek(new Range(), new HashSet<>(), false);
int count = 0;
while (titer.hasTop()) {
Key key = titer.getTopKey();
titer.next();
assertEquals("cf1", key.getColumnFamily().toString());
assertEquals("cq1", key.getColumnQualifier().toString());
assertEquals("", key.getColumnVisibility().toString());
assertEquals(5L, key.getTimestamp());
count++;
}
assertEquals(81, count);
}
@Test
public void testValidateOptions() {
TransformingIterator ti = new ColFamReversingKeyTransformingIterator();
IteratorSetting is =
new IteratorSetting(100, "cfrkt", ColFamReversingKeyTransformingIterator.class);
TransformingIterator.setAuthorizations(is, new Authorizations("A", "B"));
TransformingIterator.setMaxBufferSize(is, 10000000);
assertTrue(ti.validateOptions(is.getOptions()));
Map<String,String> opts = new HashMap<>();
opts.put(TransformingIterator.MAX_BUFFER_SIZE_OPT, "10M");
assertTrue(ti.validateOptions(is.getOptions()));
opts.clear();
opts.put(TransformingIterator.MAX_BUFFER_SIZE_OPT, "A,B");
assertThrows(IllegalArgumentException.class, () -> ti.validateOptions(opts));
opts.clear();
opts.put(TransformingIterator.AUTH_OPT, Authorizations.HEADER + "~~~~");
assertThrows(IllegalArgumentException.class, () -> ti.validateOptions(opts));
}
private Key createDeleteKey(String row, String colFam, String colQual, String colVis,
long timestamp) {
Key key = new Key(row, colFam, colQual, colVis, timestamp);
key.setDeleted(true);
return key;
}
private void checkExpected(Range range, TreeMap<Key,Value> expectedEntries) throws IOException {
checkExpected(range, new HashSet<>(), expectedEntries);
}
private void checkExpected(TreeMap<Key,Value> expectedEntries, String... fa) throws IOException {
HashSet<ByteSequence> families = new HashSet<>();
for (String family : fa) {
families.add(new ArrayByteSequence(family));
}
checkExpected(new Range(), families, expectedEntries);
}
private void checkExpected(Range range, Set<ByteSequence> families,
TreeMap<Key,Value> expectedEntries) throws IOException {
titer.seek(range, families, !families.isEmpty());
while (titer.hasTop()) {
Entry<Key,Value> expected = expectedEntries.pollFirstEntry();
Key actualKey = titer.getTopKey();
Value actualValue = titer.getTopValue();
titer.next();
assertNotNull(expected, "Ran out of expected entries on: " + actualKey);
assertEquals(expected.getKey(), actualKey, "Key mismatch");
assertEquals(expected.getValue(), actualValue, "Value mismatch");
}
assertTrue(expectedEntries.isEmpty(),
"Scanner did not return all expected entries: " + expectedEntries);
}
private static void putExpected(SortedMap<Key,Value> expected, int rowID, int cfID, int cqID,
int cvID, PartialKey part) {
String row = "row" + rowID;
String cf = "cf" + cfID;
String cq = "cq" + cqID;
String cv = "vis" + cvID;
long ts = 100 * cfID + 10 * cqID + cvID;
String val = "val" + ts;
if (part != null) {
switch (part) {
case ROW:
cf = transform(new Text(cf)).toString();
break;
case ROW_COLFAM:
cq = transform(new Text(cq)).toString();
break;
case ROW_COLFAM_COLQUAL:
cv = transform(new Text(cv)).toString();
break;
default:
break;
}
}
expected.put(new Key(row, cf, cq, cv, ts), new Value(val));
}
private static Text transform(Text val) {
String s = val.toString();
// Reverse the order of the number at the end, and subtract one
int i = 3 - Integer.parseInt(s.substring(s.length() - 1));
StringBuilder sb = new StringBuilder();
sb.append(s.substring(0, s.length() - 1));
sb.append(i);
return new Text(sb.toString());
}
private static void generateRow(TreeMap<Key,Value> data, String row) {
for (int cfID = 1; cfID <= 3; ++cfID) {
for (int cqID = 1; cqID <= 3; ++cqID) {
for (int cvID = 1; cvID <= 3; ++cvID) {
String cf = "cf" + cfID;
String cq = "cq" + cqID;
String cv = "vis" + cvID;
long ts = 100 * cfID + 10 * cqID + cvID;
String val = "val" + ts;
Key k = new Key(row, cf, cq, cv, ts);
Value v = new Value(val);
data.put(k, v);
}
}
}
}
private static Key reverseKeyPart(Key originalKey, PartialKey part) {
Text row = originalKey.getRow();
Text cf = originalKey.getColumnFamily();
Text cq = originalKey.getColumnQualifier();
Text cv = originalKey.getColumnVisibility();
long ts = originalKey.getTimestamp();
switch (part) {
case ROW:
cf = transform(cf);
break;
case ROW_COLFAM:
cq = transform(cq);
break;
case ROW_COLFAM_COLQUAL:
cv = transform(cv);
break;
default:
break;
}
return new Key(row, cf, cq, cv, ts);
}
public static class IdentityKeyTransformingIterator extends TransformingIterator {
@Override
protected PartialKey getKeyPrefix() {
return PartialKey.ROW;
}
@Override
protected void transformRange(SortedKeyValueIterator<Key,Value> input, KVBuffer output)
throws IOException {
while (input.hasTop()) {
output.append(input.getTopKey(), input.getTopValue());
input.next();
}
}
}
public static class DupeTransformingIterator extends TransformingIterator {
@Override
protected void transformRange(SortedKeyValueIterator<Key,Value> input, KVBuffer output)
throws IOException {
while (input.hasTop()) {
Key originalKey = input.getTopKey();
Key ret = replaceKeyParts(originalKey, new Text("cf1"), new Text("cq1"), new Text(""));
ret.setTimestamp(5);
output.append(ret, input.getTopValue());
input.next();
}
}
@Override
protected PartialKey getKeyPrefix() {
return PartialKey.ROW;
}
}
public abstract static class ReversingKeyTransformingIterator extends TransformingIterator {
@Override
protected void transformRange(SortedKeyValueIterator<Key,Value> input, KVBuffer output)
throws IOException {
while (input.hasTop()) {
Key originalKey = input.getTopKey();
output.append(reverseKeyPart(originalKey, getKeyPrefix()), input.getTopValue());
input.next();
}
}
}
public static class ColFamReversingKeyTransformingIterator
extends ReversingKeyTransformingIterator {
@Override
protected PartialKey getKeyPrefix() {
return PartialKey.ROW;
}
@Override
protected Collection<ByteSequence>
untransformColumnFamilies(Collection<ByteSequence> columnFamilies) {
HashSet<ByteSequence> untransformed = new HashSet<>();
for (ByteSequence cf : columnFamilies) {
untransformed.add(untransformColumnFamily(cf));
}
return untransformed;
}
protected ByteSequence untransformColumnFamily(ByteSequence colFam) {
Text transformed = transform(new Text(colFam.toArray()));
byte[] bytes = transformed.getBytes();
return new ArrayByteSequence(bytes, 0, transformed.getLength());
}
}
public static class ColFamReversingCompactionKeyTransformingIterator
extends ColFamReversingKeyTransformingIterator {
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
env = new MajCIteratorEnvironmentAdapter();
super.init(source, options, env);
}
}
public static class ColQualReversingKeyTransformingIterator
extends ReversingKeyTransformingIterator {
@Override
protected PartialKey getKeyPrefix() {
return PartialKey.ROW_COLFAM;
}
}
public static class ColVisReversingKeyTransformingIterator
extends ReversingKeyTransformingIterator {
@Override
protected PartialKey getKeyPrefix() {
return PartialKey.ROW_COLFAM_COLQUAL;
}
}
public static class IllegalVisKeyTransformingIterator extends TransformingIterator {
@Override
protected PartialKey getKeyPrefix() {
return PartialKey.ROW_COLFAM_COLQUAL;
}
@Override
protected void transformRange(SortedKeyValueIterator<Key,Value> input, KVBuffer output)
throws IOException {
while (input.hasTop()) {
Key originalKey = input.getTopKey();
output.append(
new Key(originalKey.getRow(), originalKey.getColumnFamily(),
originalKey.getColumnQualifier(), new Text("A&|||"), originalKey.getTimestamp()),
input.getTopValue());
input.next();
}
}
}
public static class IllegalVisCompactionKeyTransformingIterator
extends IllegalVisKeyTransformingIterator {
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
env = new MajCIteratorEnvironmentAdapter();
super.init(source, options, env);
}
}
public static class BadVisKeyTransformingIterator extends TransformingIterator {
@Override
protected PartialKey getKeyPrefix() {
return PartialKey.ROW_COLFAM_COLQUAL;
}
@Override
protected void transformRange(SortedKeyValueIterator<Key,Value> input, KVBuffer output)
throws IOException {
while (input.hasTop()) {
Key originalKey = input.getTopKey();
output.append(
new Key(originalKey.getRow(), originalKey.getColumnFamily(),
originalKey.getColumnQualifier(), new Text("badvis"), originalKey.getTimestamp()),
input.getTopValue());
input.next();
}
}
}
public static class BadVisCompactionKeyTransformingIterator
extends BadVisKeyTransformingIterator {
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
env = new MajCIteratorEnvironmentAdapter();
super.init(source, options, env);
}
}
public static class ReuseIterator extends WrappingIterator {
private Key topKey = new Key();
private Value topValue = new Value();
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
ReuseIterator rei = new ReuseIterator();
rei.setSource(getSource().deepCopy(env));
return rei;
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
super.seek(range, columnFamilies, inclusive);
loadTop();
}
@Override
public void next() throws IOException {
super.next();
loadTop();
}
@Override
public Key getTopKey() {
return topKey;
}
@Override
public Value getTopValue() {
return topValue;
}
private void loadTop() {
if (hasTop()) {
topKey.set(super.getTopKey());
topValue.set(super.getTopValue().get());
}
}
}
private static class MajCIteratorEnvironmentAdapter implements IteratorEnvironment {
@Override
public IteratorScope getIteratorScope() {
return IteratorScope.majc;
}
}
}
| 9,507 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/IndexedDocIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map.Entry;
import java.util.TreeMap;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.rfile.AbstractRFileTest.TestRFile;
import org.apache.accumulo.core.file.rfile.RFileTest;
import org.apache.accumulo.core.iterators.DefaultIteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.MultiIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class IndexedDocIteratorTest {
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
private static final byte[] nullByte = {0};
private static IteratorEnvironment env = new DefaultIteratorEnvironment();
Text[] columnFamilies;
Text[] otherColumnFamilies;
static int docid = 0;
static String docColfPrefix = "doc";
static Text indexColf = new Text("index");
static Text docColf = new Text(docColfPrefix);
static {
docColf.append(nullByte, 0, 1);
docColf.append("type".getBytes(), 0, "type".getBytes().length);
}
private TreeMap<Key,Value> createSortedMap(float hitRatio, int numRows, int numDocsPerRow,
Text[] columnFamilies, Text[] otherColumnFamilies, HashSet<Text> docs,
Text[] negatedColumns) {
StringBuilder sb = new StringBuilder();
Value v = new Value();
TreeMap<Key,Value> map = new TreeMap<>();
boolean[] negateMask = new boolean[columnFamilies.length];
for (int i = 0; i < columnFamilies.length; i++) {
negateMask[i] = false;
if (negatedColumns.length > 0) {
for (Text ng : negatedColumns) {
if (columnFamilies[i].equals(ng)) {
negateMask[i] = true;
}
}
}
}
for (int i = 0; i < numRows; i++) {
Text row = new Text(String.format("%06d", i));
for (int startDocID = docid; docid - startDocID < numDocsPerRow; docid++) {
sb.setLength(0);
sb.append("fake doc contents");
boolean docHits = true;
Text doc = new Text("type");
doc.append(nullByte, 0, 1);
doc.append(String.format("%010d", docid).getBytes(), 0, 10);
for (int j = 0; j < columnFamilies.length; j++) {
if (RANDOM.get().nextFloat() < hitRatio) {
Text colq = new Text(columnFamilies[j]);
colq.append(nullByte, 0, 1);
colq.append(doc.getBytes(), 0, doc.getLength());
colq.append(nullByte, 0, 1);
colq.append("stuff".getBytes(), 0, "stuff".length());
Key k = new Key(row, indexColf, colq);
map.put(k, v);
sb.append(" ");
sb.append(columnFamilies[j]);
if (negateMask[j]) {
docHits = false;
}
} else {
if (!negateMask[j]) {
docHits = false;
}
}
}
if (docHits) {
docs.add(doc);
}
for (Text cf : otherColumnFamilies) {
if (RANDOM.get().nextFloat() < hitRatio) {
Text colq = new Text(cf);
colq.append(nullByte, 0, 1);
colq.append(doc.getBytes(), 0, doc.getLength());
colq.append(nullByte, 0, 1);
colq.append("stuff".getBytes(), 0, "stuff".length());
Key k = new Key(row, indexColf, colq);
map.put(k, v);
sb.append(" ");
sb.append(cf);
}
}
sb.append(" docID=").append(doc);
Key k = new Key(row, docColf, new Text(String.format("%010d", docid).getBytes()));
map.put(k, new Value(sb.toString()));
}
}
return map;
}
static TestRFile trf = new TestRFile(DefaultConfiguration.getInstance());
private SortedKeyValueIterator<Key,Value> createIteratorStack(float hitRatio, int numRows,
int numDocsPerRow, Text[] columnFamilies, Text[] otherColumnFamilies, HashSet<Text> docs)
throws IOException {
Text[] nullText = new Text[0];
return createIteratorStack(hitRatio, numRows, numDocsPerRow, columnFamilies,
otherColumnFamilies, docs, nullText);
}
private SortedKeyValueIterator<Key,Value> createIteratorStack(float hitRatio, int numRows,
int numDocsPerRow, Text[] columnFamilies, Text[] otherColumnFamilies, HashSet<Text> docs,
Text[] negatedColumns) throws IOException {
// write a data file
trf.openWriter(false);
TreeMap<Key,Value> inMemoryMap = createSortedMap(hitRatio, numRows, numDocsPerRow,
columnFamilies, otherColumnFamilies, docs, negatedColumns);
trf.writer.startNewLocalityGroup("docs", RFileTest.newColFamByteSequence(docColf.toString()));
for (Entry<Key,Value> entry : inMemoryMap.entrySet()) {
if (entry.getKey().getColumnFamily().equals(docColf)) {
trf.writer.append(entry.getKey(), entry.getValue());
}
}
trf.writer.startNewLocalityGroup("terms",
RFileTest.newColFamByteSequence(indexColf.toString()));
for (Entry<Key,Value> entry : inMemoryMap.entrySet()) {
if (entry.getKey().getColumnFamily().equals(indexColf)) {
trf.writer.append(entry.getKey(), entry.getValue());
}
}
trf.closeWriter();
trf.openReader();
return trf.reader;
}
private static synchronized void cleanup() throws IOException {
trf.closeReader();
docid = 0;
}
private static final int NUM_ROWS = 5;
private static final int NUM_DOCIDS = 200;
@Test
public void test1() throws IOException {
columnFamilies = new Text[2];
columnFamilies[0] = new Text("CC");
columnFamilies[1] = new Text("EEE");
otherColumnFamilies = new Text[4];
otherColumnFamilies[0] = new Text("A");
otherColumnFamilies[1] = new Text("B");
otherColumnFamilies[2] = new Text("D");
otherColumnFamilies[3] = new Text("F");
float hitRatio = 0.5f;
HashSet<Text> docs = new HashSet<>();
SortedKeyValueIterator<Key,Value> source = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs);
IteratorSetting is = new IteratorSetting(1, IndexedDocIterator.class);
IndexedDocIterator.setColumnFamilies(is, columnFamilies);
IndexedDocIterator.setColfs(is, indexColf.toString(), docColfPrefix);
IndexedDocIterator iter = new IndexedDocIterator();
iter.init(source, is.getOptions(), env);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
int hitCount = 0;
while (iter.hasTop()) {
hitCount++;
Key k = iter.getTopKey();
Value v = iter.getTopValue();
// System.out.println(k.toString());
// System.out.println(iter.getDocID(k));
Text d = IndexedDocIterator.parseDocID(k);
assertTrue(docs.contains(d));
assertTrue(new String(v.get()).endsWith(" docID=" + d));
iter.next();
}
assertEquals(hitCount, docs.size());
cleanup();
}
@Test
public void test2() throws IOException {
columnFamilies = new Text[3];
columnFamilies[0] = new Text("A");
columnFamilies[1] = new Text("E");
columnFamilies[2] = new Text("G");
otherColumnFamilies = new Text[4];
otherColumnFamilies[0] = new Text("B");
otherColumnFamilies[1] = new Text("C");
otherColumnFamilies[2] = new Text("D");
otherColumnFamilies[3] = new Text("F");
float hitRatio = 0.5f;
HashSet<Text> docs = new HashSet<>();
SortedKeyValueIterator<Key,Value> source = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs);
IteratorSetting is = new IteratorSetting(1, IndexedDocIterator.class);
IndexedDocIterator.setColumnFamilies(is, columnFamilies);
IndexedDocIterator.setColfs(is, indexColf.toString(), docColfPrefix);
IndexedDocIterator iter = new IndexedDocIterator();
iter.init(source, is.getOptions(), env);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
int hitCount = 0;
while (iter.hasTop()) {
hitCount++;
Key k = iter.getTopKey();
Value v = iter.getTopValue();
Text d = IndexedDocIterator.parseDocID(k);
assertTrue(docs.contains(d));
assertTrue(new String(v.get()).endsWith(" docID=" + d));
iter.next();
}
assertEquals(hitCount, docs.size());
cleanup();
}
@Test
public void test3() throws IOException {
columnFamilies = new Text[6];
columnFamilies[0] = new Text("C");
columnFamilies[1] = new Text("E");
columnFamilies[2] = new Text("G");
columnFamilies[3] = new Text("H");
columnFamilies[4] = new Text("I");
columnFamilies[5] = new Text("J");
otherColumnFamilies = new Text[4];
otherColumnFamilies[0] = new Text("A");
otherColumnFamilies[1] = new Text("B");
otherColumnFamilies[2] = new Text("D");
otherColumnFamilies[3] = new Text("F");
float hitRatio = 0.5f;
HashSet<Text> docs = new HashSet<>();
SortedKeyValueIterator<Key,Value> source = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs);
SortedKeyValueIterator<Key,Value> source2 = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs);
ArrayList<SortedKeyValueIterator<Key,Value>> sourceIters = new ArrayList<>();
sourceIters.add(source);
sourceIters.add(source2);
MultiIterator mi = new MultiIterator(sourceIters, false);
IteratorSetting is = new IteratorSetting(1, IndexedDocIterator.class);
IndexedDocIterator.setColumnFamilies(is, columnFamilies);
IndexedDocIterator.setColfs(is, indexColf.toString(), docColfPrefix);
IndexedDocIterator iter = new IndexedDocIterator();
iter.init(mi, is.getOptions(), env);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
int hitCount = 0;
while (iter.hasTop()) {
hitCount++;
Key k = iter.getTopKey();
Value v = iter.getTopValue();
Text d = IndexedDocIterator.parseDocID(k);
assertTrue(docs.contains(d));
assertTrue(new String(v.get()).endsWith(" docID=" + d));
iter.next();
}
assertEquals(hitCount, docs.size());
cleanup();
}
@Test
public void test4() throws IOException {
columnFamilies = new Text[3];
boolean[] notFlags = new boolean[3];
columnFamilies[0] = new Text("A");
notFlags[0] = true;
columnFamilies[1] = new Text("E");
notFlags[1] = false;
columnFamilies[2] = new Text("G");
notFlags[2] = true;
Text[] negatedColumns = new Text[2];
negatedColumns[0] = new Text("A");
negatedColumns[1] = new Text("G");
otherColumnFamilies = new Text[4];
otherColumnFamilies[0] = new Text("B");
otherColumnFamilies[1] = new Text("C");
otherColumnFamilies[2] = new Text("D");
otherColumnFamilies[3] = new Text("F");
float hitRatio = 0.5f;
HashSet<Text> docs = new HashSet<>();
SortedKeyValueIterator<Key,Value> source = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs, negatedColumns);
IteratorSetting is = new IteratorSetting(1, IndexedDocIterator.class);
IndexedDocIterator.setColumnFamilies(is, columnFamilies, notFlags);
IndexedDocIterator.setColfs(is, indexColf.toString(), docColfPrefix);
IndexedDocIterator iter = new IndexedDocIterator();
iter.init(source, is.getOptions(), env);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
int hitCount = 0;
while (iter.hasTop()) {
hitCount++;
Key k = iter.getTopKey();
Value v = iter.getTopValue();
Text d = IndexedDocIterator.parseDocID(k);
assertTrue(docs.contains(d));
assertTrue(new String(v.get()).endsWith(" docID=" + d));
iter.next();
}
assertEquals(hitCount, docs.size());
cleanup();
}
}
| 9,508 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/VisibilityFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.Filter;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class VisibilityFilterTest {
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
private static final Text BAD = new Text("bad");
private static final Text GOOD = new Text("good");
private static final Text EMPTY_VIS = new Text("");
private static final Text GOOD_VIS = new Text("abc|def");
private static final Text HIDDEN_VIS = new Text("abc&def&ghi");
private static final Text BAD_VIS = new Text("&");
private static final Value EMPTY_VALUE = new Value();
private TreeMap<Key,Value> createUnprotectedSource(int numPublic, int numHidden) {
TreeMap<Key,Value> source = new TreeMap<>();
for (int i = 0; i < numPublic; i++) {
source.put(new Key(new Text(String.format("%03d", i)), GOOD, GOOD, EMPTY_VIS), EMPTY_VALUE);
}
for (int i = 0; i < numHidden; i++) {
source.put(new Key(new Text(String.format("%03d", i)), BAD, BAD, GOOD_VIS), EMPTY_VALUE);
}
return source;
}
private TreeMap<Key,Value> createPollutedSource(int numGood, int numBad) {
TreeMap<Key,Value> source = new TreeMap<>();
for (int i = 0; i < numGood; i++) {
source.put(new Key(new Text(String.format("%03d", i)), GOOD, GOOD, GOOD_VIS), EMPTY_VALUE);
}
for (int i = 0; i < numBad; i++) {
source.put(new Key(new Text(String.format("%03d", i)), BAD, BAD, BAD_VIS), EMPTY_VALUE);
}
return source;
}
private TreeMap<Key,Value> createSourceWithHiddenData(int numViewable, int numHidden) {
TreeMap<Key,Value> source = new TreeMap<>();
for (int i = 0; i < numViewable; i++) {
source.put(new Key(new Text(String.format("%03d", i)), GOOD, GOOD, GOOD_VIS), EMPTY_VALUE);
}
for (int i = 0; i < numHidden; i++) {
source.put(new Key(new Text(String.format("%03d", i)), BAD, BAD, HIDDEN_VIS), EMPTY_VALUE);
}
return source;
}
private void verify(TreeMap<Key,Value> source, int expectedSourceSize, Map<String,String> options,
Text expectedCF, Text expectedCQ, Text expectedCV, int expectedFinalCount)
throws IOException {
assertEquals(expectedSourceSize, source.size());
Filter filter = new VisibilityFilter();
filter.init(new SortedMapIterator(source), options, null);
filter.seek(new Range(), EMPTY_COL_FAMS, false);
int count = 0;
while (filter.hasTop()) {
count++;
// System.out.println(DefaultFormatter.formatEntry(
// Collections.singletonMap(filter.getTopKey(),
// filter.getTopValue()).entrySet().iterator().next(),
// false));
assertEquals(expectedCF, filter.getTopKey().getColumnFamily());
assertEquals(expectedCQ, filter.getTopKey().getColumnQualifier());
assertEquals(expectedCV, filter.getTopKey().getColumnVisibility());
filter.next();
}
assertEquals(expectedFinalCount, count);
}
@Test
public void testAllowValidLabelsOnly() throws IOException {
IteratorSetting is = new IteratorSetting(1, VisibilityFilter.class);
VisibilityFilter.filterInvalidLabelsOnly(is, true);
TreeMap<Key,Value> source = createPollutedSource(1, 2);
verify(source, 3, is.getOptions(), GOOD, GOOD, GOOD_VIS, 1);
source = createPollutedSource(30, 500);
verify(source, 530, is.getOptions(), GOOD, GOOD, GOOD_VIS, 30);
source = createPollutedSource(1000, 500);
verify(source, 1500, is.getOptions(), GOOD, GOOD, GOOD_VIS, 1000);
}
@Test
public void testAllowBadLabelsOnly() throws IOException {
IteratorSetting is = new IteratorSetting(1, VisibilityFilter.class);
VisibilityFilter.setNegate(is, true);
VisibilityFilter.filterInvalidLabelsOnly(is, true);
TreeMap<Key,Value> source = createPollutedSource(1, 2);
verify(source, 3, is.getOptions(), BAD, BAD, BAD_VIS, 2);
source = createPollutedSource(30, 500);
verify(source, 530, is.getOptions(), BAD, BAD, BAD_VIS, 500);
source = createPollutedSource(1000, 500);
verify(source, 1500, is.getOptions(), BAD, BAD, BAD_VIS, 500);
}
@Test
public void testAllowAuthorizedLabelsOnly() throws IOException {
IteratorSetting is = new IteratorSetting(1, VisibilityFilter.class);
VisibilityFilter.setAuthorizations(is, new Authorizations("def"));
TreeMap<Key,Value> source = createSourceWithHiddenData(1, 2);
verify(source, 3, is.getOptions(), GOOD, GOOD, GOOD_VIS, 1);
source = createSourceWithHiddenData(30, 500);
verify(source, 530, is.getOptions(), GOOD, GOOD, GOOD_VIS, 30);
source = createSourceWithHiddenData(1000, 500);
verify(source, 1500, is.getOptions(), GOOD, GOOD, GOOD_VIS, 1000);
}
@Test
public void testAllowUnauthorizedLabelsOnly() throws IOException {
IteratorSetting is = new IteratorSetting(1, VisibilityFilter.class);
VisibilityFilter.setNegate(is, true);
VisibilityFilter.setAuthorizations(is, new Authorizations("def"));
TreeMap<Key,Value> source = createSourceWithHiddenData(1, 2);
verify(source, 3, is.getOptions(), BAD, BAD, HIDDEN_VIS, 2);
source = createSourceWithHiddenData(30, 500);
verify(source, 530, is.getOptions(), BAD, BAD, HIDDEN_VIS, 500);
source = createSourceWithHiddenData(1000, 500);
verify(source, 1500, is.getOptions(), BAD, BAD, HIDDEN_VIS, 500);
}
@Test
public void testNoLabels() throws IOException {
IteratorSetting is = new IteratorSetting(1, VisibilityFilter.class);
VisibilityFilter.setNegate(is, false);
VisibilityFilter.setAuthorizations(is, new Authorizations());
TreeMap<Key,Value> source = createUnprotectedSource(5, 2);
verify(source, 7, is.getOptions(), GOOD, GOOD, EMPTY_VIS, 5);
VisibilityFilter.setNegate(is, true);
verify(source, 7, is.getOptions(), BAD, BAD, GOOD_VIS, 2);
}
@Test
public void testFilterUnauthorizedAndBad() throws IOException {
/*
* if not explicitly filtering bad labels, they will still be filtered while validating against
* authorizations, but it will be very verbose in the logs
*/
IteratorSetting is = new IteratorSetting(1, VisibilityFilter.class);
VisibilityFilter.setAuthorizations(is, new Authorizations("def"));
TreeMap<Key,Value> source = createSourceWithHiddenData(1, 5);
for (Entry<Key,Value> entry : createPollutedSource(0, 1).entrySet()) {
source.put(entry.getKey(), entry.getValue());
}
verify(source, 7, is.getOptions(), GOOD, GOOD, GOOD_VIS, 1);
}
@Test
public void testCommaSeparatedAuthorizations() throws IOException {
Map<String,String> options = Collections.singletonMap("auths", "x,def,y");
TreeMap<Key,Value> source = createSourceWithHiddenData(1, 2);
verify(source, 3, options, GOOD, GOOD, GOOD_VIS, 1);
source = createSourceWithHiddenData(30, 500);
verify(source, 530, options, GOOD, GOOD, GOOD_VIS, 30);
source = createSourceWithHiddenData(1000, 500);
verify(source, 1500, options, GOOD, GOOD, GOOD_VIS, 1000);
}
@Test
public void testSerializedAuthorizations() throws IOException {
Map<String,String> options =
Collections.singletonMap("auths", new Authorizations("x", "def", "y").serialize());
TreeMap<Key,Value> source = createSourceWithHiddenData(1, 2);
verify(source, 3, options, GOOD, GOOD, GOOD_VIS, 1);
source = createSourceWithHiddenData(30, 500);
verify(source, 530, options, GOOD, GOOD, GOOD_VIS, 30);
source = createSourceWithHiddenData(1000, 500);
verify(source, 1500, options, GOOD, GOOD, GOOD_VIS, 1000);
}
@Test
public void testStaticConfigurators() {
IteratorSetting is = new IteratorSetting(1, VisibilityFilter.class);
VisibilityFilter.filterInvalidLabelsOnly(is, false);
VisibilityFilter.setNegate(is, true);
VisibilityFilter.setAuthorizations(is, new Authorizations("abc", "def"));
Map<String,String> opts = is.getOptions();
assertEquals("false", opts.get("filterInvalid"));
assertEquals("true", opts.get("negate"));
assertEquals(new Authorizations("abc", "def").serialize(), opts.get("auths"));
}
}
| 9,509 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/GrepIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class GrepIteratorTest {
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
SortedMap<Key,Value> input;
SortedMap<Key,Value> output;
@BeforeEach
public void init() {
input = new TreeMap<>();
output = new TreeMap<>();
input.put(new Key("abcdef", "xyz", "xyz", 0), new Value("xyz"));
output.put(new Key("abcdef", "xyz", "xyz", 0), new Value("xyz"));
input.put(new Key("bdf", "ace", "xyz", 0), new Value("xyz"));
input.put(new Key("bdf", "abcdef", "xyz", 0), new Value("xyz"));
output.put(new Key("bdf", "abcdef", "xyz", 0), new Value("xyz"));
input.put(new Key("bdf", "xyz", "xyz", 0), new Value("xyz"));
input.put(new Key("ceg", "xyz", "abcdef", 0), new Value("xyz"));
output.put(new Key("ceg", "xyz", "abcdef", 0), new Value("xyz"));
input.put(new Key("ceg", "xyz", "xyz", 0), new Value("xyz"));
input.put(new Key("dfh", "xyz", "xyz", 0), new Value("abcdef"));
output.put(new Key("dfh", "xyz", "xyz", 0), new Value("abcdef"));
input.put(new Key("dfh", "xyz", "xyz", 1), new Value("xyz"));
Key k = new Key("dfh", "xyz", "xyz", 1);
k.setDeleted(true);
input.put(k, new Value("xyz"));
output.put(k, new Value("xyz"));
}
public static void checkEntries(SortedKeyValueIterator<Key,Value> skvi, SortedMap<Key,Value> map)
throws IOException {
for (Entry<Key,Value> e : map.entrySet()) {
assertTrue(skvi.hasTop());
assertEquals(e.getKey(), skvi.getTopKey());
assertEquals(e.getValue(), skvi.getTopValue());
skvi.next();
}
assertFalse(skvi.hasTop());
}
@Test
public void test() throws IOException {
GrepIterator gi = new GrepIterator();
IteratorSetting is = new IteratorSetting(1, GrepIterator.class);
GrepIterator.setTerm(is, "ab");
gi.init(new SortedMapIterator(input), is.getOptions(), null);
gi.seek(new Range(), EMPTY_COL_FAMS, false);
checkEntries(gi, output);
GrepIterator.setTerm(is, "cde");
gi.init(new SortedMapIterator(input), is.getOptions(), null);
gi.deepCopy(null);
gi.seek(new Range(), EMPTY_COL_FAMS, false);
checkEntries(gi, output);
GrepIterator.setTerm(is, "def");
gi.init(new SortedMapIterator(input), is.getOptions(), null);
gi.seek(new Range(), EMPTY_COL_FAMS, false);
checkEntries(gi, output);
}
}
| 9,510 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/IntersectingIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.TreeMap;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.DefaultIteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.MultiIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class IntersectingIteratorTest {
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
private static IteratorEnvironment env = new DefaultIteratorEnvironment();
HashSet<Text> docs = new HashSet<>();
Text[] columnFamilies;
Text[] negatedColumns;
Text[] otherColumnFamilies;
boolean[] notFlags;
int docid = 0;
private TreeMap<Key,Value> createSortedMap(float hitRatio, int numRows, int numDocsPerRow,
Text[] columnFamilies, Text[] otherColumnFamilies, HashSet<Text> docs,
Text[] negatedColumns) {
Value v = new Value();
TreeMap<Key,Value> map = new TreeMap<>();
boolean[] negateMask = new boolean[columnFamilies.length];
for (int i = 0; i < columnFamilies.length; i++) {
negateMask[i] = false;
if (negatedColumns.length > 0) {
for (Text ng : negatedColumns) {
if (columnFamilies[i].equals(ng)) {
negateMask[i] = true;
}
}
}
}
for (int i = 0; i < numRows; i++) {
Text row = new Text(String.format("%06d", i));
for (int startDocID = docid; docid - startDocID < numDocsPerRow; docid++) {
boolean docHits = true;
Text doc = new Text(String.format("%010d", docid));
for (int j = 0; j < columnFamilies.length; j++) {
if (RANDOM.get().nextFloat() < hitRatio) {
Key k = new Key(row, columnFamilies[j], doc);
map.put(k, v);
if (negateMask[j]) {
docHits = false;
}
} else {
if (!negateMask[j]) {
docHits = false;
}
}
}
if (docHits) {
docs.add(doc);
}
for (Text cf : otherColumnFamilies) {
if (RANDOM.get().nextFloat() < hitRatio) {
Key k = new Key(row, cf, doc);
map.put(k, v);
}
}
}
}
return map;
}
private SortedKeyValueIterator<Key,Value> createIteratorStack(float hitRatio, int numRows,
int numDocsPerRow, Text[] columnFamilies, Text[] otherColumnFamilies, HashSet<Text> docs) {
Text[] nullText = new Text[0];
return createIteratorStack(hitRatio, numRows, numDocsPerRow, columnFamilies,
otherColumnFamilies, docs, nullText);
}
private SortedKeyValueIterator<Key,Value> createIteratorStack(float hitRatio, int numRows,
int numDocsPerRow, Text[] columnFamilies, Text[] otherColumnFamilies, HashSet<Text> docs,
Text[] negatedColumns) {
TreeMap<Key,Value> inMemoryMap = createSortedMap(hitRatio, numRows, numDocsPerRow,
columnFamilies, otherColumnFamilies, docs, negatedColumns);
return new SortedMapIterator(inMemoryMap);
}
private void cleanup() {
docid = 0;
}
private static final int NUM_ROWS = 10;
private static final int NUM_DOCIDS = 1000;
@Test
public void test1() throws IOException {
columnFamilies = new Text[2];
columnFamilies[0] = new Text("C");
columnFamilies[1] = new Text("E");
otherColumnFamilies = new Text[4];
otherColumnFamilies[0] = new Text("A");
otherColumnFamilies[1] = new Text("B");
otherColumnFamilies[2] = new Text("D");
otherColumnFamilies[3] = new Text("F");
float hitRatio = 0.5f;
SortedKeyValueIterator<Key,Value> source = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs);
IteratorSetting is = new IteratorSetting(1, IntersectingIterator.class);
IntersectingIterator.setColumnFamilies(is, columnFamilies);
IntersectingIterator iter = new IntersectingIterator();
iter.init(source, is.getOptions(), env);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
int hitCount = 0;
while (iter.hasTop()) {
hitCount++;
Key k = iter.getTopKey();
assertTrue(docs.contains(k.getColumnQualifier()));
iter.next();
}
assertEquals(hitCount, docs.size());
cleanup();
}
@Test
public void test2() throws IOException {
columnFamilies = new Text[3];
columnFamilies[0] = new Text("A");
columnFamilies[1] = new Text("E");
columnFamilies[2] = new Text("G");
otherColumnFamilies = new Text[4];
otherColumnFamilies[0] = new Text("B");
otherColumnFamilies[1] = new Text("C");
otherColumnFamilies[2] = new Text("D");
otherColumnFamilies[3] = new Text("F");
float hitRatio = 0.5f;
SortedKeyValueIterator<Key,Value> source = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs);
IteratorSetting is = new IteratorSetting(1, IntersectingIterator.class);
IntersectingIterator.setColumnFamilies(is, columnFamilies);
IntersectingIterator iter = new IntersectingIterator();
iter.init(source, is.getOptions(), env);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
int hitCount = 0;
while (iter.hasTop()) {
hitCount++;
Key k = iter.getTopKey();
assertTrue(docs.contains(k.getColumnQualifier()));
iter.next();
}
assertEquals(hitCount, docs.size());
cleanup();
}
@Test
public void test3() throws IOException {
columnFamilies = new Text[6];
columnFamilies[0] = new Text("C");
columnFamilies[1] = new Text("E");
columnFamilies[2] = new Text("G");
columnFamilies[3] = new Text("H");
columnFamilies[4] = new Text("I");
columnFamilies[5] = new Text("J");
otherColumnFamilies = new Text[4];
otherColumnFamilies[0] = new Text("A");
otherColumnFamilies[1] = new Text("B");
otherColumnFamilies[2] = new Text("D");
otherColumnFamilies[3] = new Text("F");
float hitRatio = 0.5f;
SortedKeyValueIterator<Key,Value> source = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs);
SortedKeyValueIterator<Key,Value> source2 = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs);
ArrayList<SortedKeyValueIterator<Key,Value>> sourceIters = new ArrayList<>();
sourceIters.add(source);
sourceIters.add(source2);
MultiIterator mi = new MultiIterator(sourceIters, false);
IteratorSetting is = new IteratorSetting(1, IntersectingIterator.class);
IntersectingIterator.setColumnFamilies(is, columnFamilies);
IntersectingIterator iter = new IntersectingIterator();
iter.init(mi, is.getOptions(), env);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
int hitCount = 0;
while (iter.hasTop()) {
hitCount++;
Key k = iter.getTopKey();
assertTrue(docs.contains(k.getColumnQualifier()));
iter.next();
}
assertEquals(hitCount, docs.size());
cleanup();
}
@Test
public void test4() throws IOException {
columnFamilies = new Text[3];
notFlags = new boolean[3];
columnFamilies[0] = new Text("A");
notFlags[0] = true;
columnFamilies[1] = new Text("E");
notFlags[1] = false;
columnFamilies[2] = new Text("G");
notFlags[2] = true;
negatedColumns = new Text[2];
negatedColumns[0] = new Text("A");
negatedColumns[1] = new Text("G");
otherColumnFamilies = new Text[4];
otherColumnFamilies[0] = new Text("B");
otherColumnFamilies[1] = new Text("C");
otherColumnFamilies[2] = new Text("D");
otherColumnFamilies[3] = new Text("F");
float hitRatio = 0.5f;
SortedKeyValueIterator<Key,Value> source = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs, negatedColumns);
IteratorSetting is = new IteratorSetting(1, IntersectingIterator.class);
IntersectingIterator.setColumnFamilies(is, columnFamilies, notFlags);
IntersectingIterator iter = new IntersectingIterator();
iter.init(source, is.getOptions(), env);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
int hitCount = 0;
while (iter.hasTop()) {
hitCount++;
Key k = iter.getTopKey();
assertTrue(docs.contains(k.getColumnQualifier()));
iter.next();
}
assertEquals(hitCount, docs.size());
cleanup();
}
@Test
public void test6() throws IOException {
columnFamilies = new Text[1];
columnFamilies[0] = new Text("C");
otherColumnFamilies = new Text[4];
otherColumnFamilies[0] = new Text("A");
otherColumnFamilies[1] = new Text("B");
otherColumnFamilies[2] = new Text("D");
otherColumnFamilies[3] = new Text("F");
float hitRatio = 0.5f;
SortedKeyValueIterator<Key,Value> source = createIteratorStack(hitRatio, NUM_ROWS, NUM_DOCIDS,
columnFamilies, otherColumnFamilies, docs);
IteratorSetting is = new IteratorSetting(1, IntersectingIterator.class);
IntersectingIterator.setColumnFamilies(is, columnFamilies);
IntersectingIterator iter = new IntersectingIterator();
iter.init(source, is.getOptions(), env);
iter.seek(new Range(), EMPTY_COL_FAMS, false);
int hitCount = 0;
while (iter.hasTop()) {
hitCount++;
Key k = iter.getTopKey();
assertTrue(docs.contains(k.getColumnQualifier()));
iter.next();
}
assertEquals(hitCount, docs.size());
cleanup();
}
}
| 9,511 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/user/ColumnSliceFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.user;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.DefaultIteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ColumnSliceFilterTest {
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
private static final SortedMap<Key,Value> TEST_DATA = new TreeMap<>();
private static final Key KEY_1 = newKeyValue(TEST_DATA, "boo1", "yup", "20080201", "dog");
private static final Key KEY_2 = newKeyValue(TEST_DATA, "boo1", "yap", "20080202", "cat");
private static final Key KEY_3 = newKeyValue(TEST_DATA, "boo2", "yap", "20080203", "hamster");
private static final Key KEY_4 = newKeyValue(TEST_DATA, "boo2", "yop", "20080204", "lion");
private static final Key KEY_5 = newKeyValue(TEST_DATA, "boo2", "yup", "20080206", "tiger");
private static final Key KEY_6 = newKeyValue(TEST_DATA, "boo2", "yip", "20080203", "tiger");
private IteratorEnvironment iteratorEnvironment;
private ColumnSliceFilter columnSliceFilter = new ColumnSliceFilter();
private IteratorSetting is;
private static Key newKeyValue(SortedMap<Key,Value> tm, String row, String cf, String cq,
String val) {
Key k = newKey(row, cf, cq);
tm.put(k, new Value(val));
return k;
}
private static Key newKey(String row, String cf, String cq) {
return new Key(new Text(row), new Text(cf), new Text(cq));
}
@BeforeEach
public void setUp() {
columnSliceFilter.describeOptions();
iteratorEnvironment = new DefaultIteratorEnvironment();
is = new IteratorSetting(1, ColumnSliceFilter.class);
}
@Test
public void testBasic() throws IOException {
ColumnSliceFilter.setSlice(is, "20080202", "20080204");
assertTrue(columnSliceFilter.validateOptions(is.getOptions()));
columnSliceFilter.init(new SortedMapIterator(TEST_DATA), is.getOptions(), iteratorEnvironment);
columnSliceFilter.seek(new Range(), EMPTY_COL_FAMS, true);
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_2);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_3);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_6);
columnSliceFilter.next();
assertFalse(columnSliceFilter.hasTop());
}
@Test
public void testBothInclusive() throws IOException {
ColumnSliceFilter.setSlice(is, "20080202", true, "20080204", true);
columnSliceFilter.validateOptions(is.getOptions());
columnSliceFilter.init(new SortedMapIterator(TEST_DATA), is.getOptions(), iteratorEnvironment);
columnSliceFilter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_2);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_3);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_6);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_4);
columnSliceFilter.next();
assertFalse(columnSliceFilter.hasTop());
}
@Test
public void testBothExclusive() throws IOException {
ColumnSliceFilter.setSlice(is, "20080202", false, "20080204", false);
columnSliceFilter.validateOptions(is.getOptions());
columnSliceFilter.init(new SortedMapIterator(TEST_DATA), is.getOptions(), iteratorEnvironment);
columnSliceFilter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_3);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_6);
columnSliceFilter.next();
assertFalse(columnSliceFilter.hasTop());
}
@Test
public void testStartExclusiveEndInclusive() throws IOException {
ColumnSliceFilter.setSlice(is, "20080202", false, "20080204", true);
columnSliceFilter.validateOptions(is.getOptions());
columnSliceFilter.init(new SortedMapIterator(TEST_DATA), is.getOptions(), iteratorEnvironment);
columnSliceFilter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_3);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_6);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_4);
columnSliceFilter.next();
assertFalse(columnSliceFilter.hasTop());
}
@Test
public void testNullStart() throws IOException {
ColumnSliceFilter.setSlice(is, null, "20080204");
columnSliceFilter.validateOptions(is.getOptions());
columnSliceFilter.init(new SortedMapIterator(TEST_DATA), is.getOptions(), iteratorEnvironment);
columnSliceFilter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_2);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_1);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_3);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_6);
columnSliceFilter.next();
assertFalse(columnSliceFilter.hasTop());
}
@Test
public void testNullEnd() throws IOException {
ColumnSliceFilter.setSlice(is, "20080202", null);
columnSliceFilter.validateOptions(is.getOptions());
columnSliceFilter.init(new SortedMapIterator(TEST_DATA), is.getOptions(), iteratorEnvironment);
columnSliceFilter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_2);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_3);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_6);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_4);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_5);
columnSliceFilter.next();
assertFalse(columnSliceFilter.hasTop());
}
@Test
public void testBothNull() throws IOException {
ColumnSliceFilter.setSlice(is, null, null);
columnSliceFilter.validateOptions(is.getOptions());
columnSliceFilter.init(new SortedMapIterator(TEST_DATA), is.getOptions(), iteratorEnvironment);
columnSliceFilter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_2);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_1);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_3);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_6);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_4);
columnSliceFilter.next();
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_5);
columnSliceFilter.next();
assertFalse(columnSliceFilter.hasTop());
}
@Test
public void testStartAfterEnd() {
assertThrows(IllegalArgumentException.class,
() -> ColumnSliceFilter.setSlice(is, "20080204", "20080202"));
}
@Test
public void testStartEqualToEndStartInclusiveEndExclusive() {
assertThrows(IllegalArgumentException.class,
() -> ColumnSliceFilter.setSlice(is, "20080202", "20080202"));
}
@Test
public void testStartEqualToEndStartExclusiveEndInclusive() {
assertThrows(IllegalArgumentException.class,
() -> ColumnSliceFilter.setSlice(is, "20080202", false, "20080202", true));
}
@Test
public void testStartEqualToEndBothInclusive() throws IOException {
ColumnSliceFilter.setSlice(is, "20080202", true, "20080202", true);
columnSliceFilter.validateOptions(is.getOptions());
columnSliceFilter.init(new SortedMapIterator(TEST_DATA), is.getOptions(), iteratorEnvironment);
columnSliceFilter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(columnSliceFilter.hasTop());
assertEquals(columnSliceFilter.getTopKey(), KEY_2);
columnSliceFilter.next();
assertFalse(columnSliceFilter.hasTop());
}
}
| 9,512 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/system/TimeSettingIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.system;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.HashSet;
import java.util.TreeMap;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.accumulo.core.iteratorsImpl.system.TimeSettingIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class TimeSettingIteratorTest {
@Test
public void test1() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
tm1.put(new Key("r0", "cf1", "cq1", 9L), new Value("v0"));
tm1.put(new Key("r1", "cf1", "cq1", Long.MAX_VALUE), new Value("v1"));
tm1.put(new Key("r1", "cf1", "cq1", 90L), new Value("v2"));
tm1.put(new Key("r1", "cf1", "cq1", 0L), new Value("v3"));
tm1.put(new Key("r2", "cf1", "cq1", 6L), new Value("v4"));
TimeSettingIterator tsi = new TimeSettingIterator(new SortedMapIterator(tm1), 50);
tsi.seek(
new Range(new Key("r1", "cf1", "cq1", 50L), true, new Key("r1", "cf1", "cq1", 50L), true),
new HashSet<>(), false);
assertTrue(tsi.hasTop());
assertEquals(new Key("r1", "cf1", "cq1", 50L), tsi.getTopKey());
assertEquals("v1", tsi.getTopValue().toString());
tsi.next();
assertTrue(tsi.hasTop());
assertEquals(new Key("r1", "cf1", "cq1", 50L), tsi.getTopKey());
assertEquals("v2", tsi.getTopValue().toString());
tsi.next();
assertTrue(tsi.hasTop());
assertEquals(new Key("r1", "cf1", "cq1", 50L), tsi.getTopKey());
assertEquals("v3", tsi.getTopValue().toString());
tsi.next();
assertFalse(tsi.hasTop());
tsi.seek(new Range(new Key("r1", "cf1", "cq1", 50L), false, null, true), new HashSet<>(),
false);
assertTrue(tsi.hasTop());
assertEquals(new Key("r2", "cf1", "cq1", 50L), tsi.getTopKey());
assertEquals("v4", tsi.getTopValue().toString());
tsi.next();
assertFalse(tsi.hasTop());
tsi.seek(new Range(null, true, new Key("r1", "cf1", "cq1", 50L), false), new HashSet<>(),
false);
assertTrue(tsi.hasTop());
assertEquals(new Key("r0", "cf1", "cq1", 50L), tsi.getTopKey());
assertEquals("v0", tsi.getTopValue().toString());
tsi.next();
assertFalse(tsi.hasTop());
tsi.seek(
new Range(new Key("r1", "cf1", "cq1", 51L), true, new Key("r1", "cf1", "cq1", 50L), false),
new HashSet<>(), false);
assertFalse(tsi.hasTop());
}
@Test
public void testAvoidKeyCopy() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
final Key k = new Key("r0", "cf1", "cq1", 9L);
tm1.put(k, new Value("v0"));
TimeSettingIterator tsi = new TimeSettingIterator(new SortedMapIterator(tm1), 50);
tsi.seek(new Range(), new HashSet<>(), false);
assertTrue(tsi.hasTop());
final Key topKey = tsi.getTopKey();
assertSame(k, topKey, "Expected the topKey to be the same object");
assertEquals(new Key("r0", "cf1", "cq1", 50L), topKey);
assertEquals("v0", tsi.getTopValue().toString());
tsi.next();
assertFalse(tsi.hasTop());
}
@Test
public void testEndKeyRangeAtMinLongValue() throws IOException {
Text row = new Text("a");
Text colf = new Text("b");
Text colq = new Text("c");
Text cv = new Text();
for (boolean inclusiveEndRange : new boolean[] {true, false}) {
TreeMap<Key,Value> sources = new TreeMap<>();
sources.put(new Key(row.getBytes(), colf.getBytes(), colq.getBytes(), cv.getBytes(),
Long.MIN_VALUE, true), new Value("00"));
sources.put(
new Key(row.getBytes(), colf.getBytes(), colq.getBytes(), cv.getBytes(), Long.MIN_VALUE),
new Value("11"));
TimeSettingIterator it = new TimeSettingIterator(new SortedMapIterator(sources), 111L);
IteratorSetting is = new IteratorSetting(1, TimeSettingIterator.class);
it.init(null, is.getOptions(), null);
Key startKey = new Key();
Key endKey = new Key(row, colf, colq, cv, Long.MIN_VALUE);
Range testRange = new Range(startKey, false, endKey, inclusiveEndRange);
it.seek(testRange, new HashSet<>(), false);
assertTrue(it.hasTop());
assertEquals(it.getTopValue(), new Value("00"));
assertEquals(111L, it.getTopKey().getTimestamp());
it.next();
assertTrue(it.hasTop());
assertEquals(it.getTopValue(), new Value("11"));
assertEquals(111L, it.getTopKey().getTimestamp());
it.next();
assertFalse(it.hasTop());
}
}
}
| 9,513 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/system/DeletingIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.system;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map.Entry;
import java.util.TreeMap;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.DeletingIterator;
import org.apache.accumulo.core.iteratorsImpl.system.DeletingIterator.Behavior;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class DeletingIteratorTest {
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
@Test
public void test1() {
Text colf = new Text("a");
Text colq = new Text("b");
Value dvOld = new Value("old");
Value dvDel = new Value("old");
Value dvNew = new Value("new");
TreeMap<Key,Value> tm = new TreeMap<>();
Key k;
for (int i = 0; i < 2; i++) {
for (long j = 0; j < 5; j++) {
k = new Key(new Text(String.format("%03d", i)), colf, colq, j);
tm.put(k, dvOld);
}
}
k = new Key(new Text(String.format("%03d", 0)), colf, colq, 5);
k.setDeleted(true);
tm.put(k, dvDel);
for (int i = 0; i < 2; i++) {
for (long j = 6; j < 11; j++) {
k = new Key(new Text(String.format("%03d", i)), colf, colq, j);
tm.put(k, dvNew);
}
}
assertEquals(21, tm.size(), "Initial size was " + tm.size());
Text checkRow = new Text("000");
try {
SortedKeyValueIterator<Key,Value> it =
DeletingIterator.wrap(new SortedMapIterator(tm), false, Behavior.PROCESS);
it.seek(new Range(), EMPTY_COL_FAMS, false);
TreeMap<Key,Value> tmOut = new TreeMap<>();
while (it.hasTop()) {
tmOut.put(it.getTopKey(), it.getTopValue());
it.next();
}
assertEquals(15, tmOut.size(), "size after no propagation was " + tmOut.size());
for (Entry<Key,Value> e : tmOut.entrySet()) {
if (e.getKey().getRow().equals(checkRow)) {
byte[] b = e.getValue().get();
assertEquals('n', b[0]);
assertEquals('e', b[1]);
assertEquals('w', b[2]);
}
}
} catch (IOException e) {
fail();
}
try {
SortedKeyValueIterator<Key,Value> it =
DeletingIterator.wrap(new SortedMapIterator(tm), true, Behavior.PROCESS);
it.seek(new Range(), EMPTY_COL_FAMS, false);
TreeMap<Key,Value> tmOut = new TreeMap<>();
while (it.hasTop()) {
tmOut.put(it.getTopKey(), it.getTopValue());
it.next();
}
assertEquals(16, tmOut.size(), "size after propagation was " + tmOut.size());
for (Entry<Key,Value> e : tmOut.entrySet()) {
if (e.getKey().getRow().equals(checkRow)) {
byte[] b = e.getValue().get();
if (e.getKey().isDeleted()) {
assertEquals('o', b[0]);
assertEquals('l', b[1]);
assertEquals('d', b[2]);
} else {
assertEquals('n', b[0]);
assertEquals('e', b[1]);
assertEquals('w', b[2]);
}
}
}
} catch (IOException e) {
fail();
}
}
// seek test
@Test
public void test2() throws IOException {
TreeMap<Key,Value> tm = new TreeMap<>();
newKeyValue(tm, "r000", 4, false, "v4");
newKeyValue(tm, "r000", 3, false, "v3");
newKeyValue(tm, "r000", 2, true, "v2");
newKeyValue(tm, "r000", 1, false, "v1");
SortedKeyValueIterator<Key,Value> it =
DeletingIterator.wrap(new SortedMapIterator(tm), false, Behavior.PROCESS);
// SEEK two keys before delete
it.seek(newRange("r000", 4), EMPTY_COL_FAMS, false);
assertTrue(it.hasTop());
assertEquals(newKey("r000", 4), it.getTopKey());
assertEquals("v4", it.getTopValue().toString());
it.next();
assertTrue(it.hasTop());
assertEquals(newKey("r000", 3), it.getTopKey());
assertEquals("v3", it.getTopValue().toString());
it.next();
assertFalse(it.hasTop());
// SEEK passed delete
it.seek(newRange("r000", 1), EMPTY_COL_FAMS, false);
assertFalse(it.hasTop());
// SEEK to delete
it.seek(newRange("r000", 2), EMPTY_COL_FAMS, false);
assertFalse(it.hasTop());
// SEEK right before delete
it.seek(newRange("r000", 3), EMPTY_COL_FAMS, false);
assertTrue(it.hasTop());
assertEquals(newKey("r000", 3), it.getTopKey());
assertEquals("v3", it.getTopValue().toString());
it.next();
assertFalse(it.hasTop());
}
// test delete with same timestamp as existing key
@Test
public void test3() throws IOException {
TreeMap<Key,Value> tm = new TreeMap<>();
newKeyValue(tm, "r000", 3, false, "v3");
newKeyValue(tm, "r000", 2, false, "v2");
newKeyValue(tm, "r000", 2, true, "");
newKeyValue(tm, "r000", 1, false, "v1");
SortedKeyValueIterator<Key,Value> it =
DeletingIterator.wrap(new SortedMapIterator(tm), false, Behavior.PROCESS);
it.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(it.hasTop());
assertEquals(newKey("r000", 3), it.getTopKey());
assertEquals("v3", it.getTopValue().toString());
it.next();
assertFalse(it.hasTop());
it.seek(newRange("r000", 2), EMPTY_COL_FAMS, false);
assertFalse(it.hasTop());
}
// test range inclusiveness
@Test
public void test4() throws IOException {
TreeMap<Key,Value> tm = new TreeMap<>();
newKeyValue(tm, "r000", 3, false, "v3");
newKeyValue(tm, "r000", 2, false, "v2");
newKeyValue(tm, "r000", 2, true, "");
newKeyValue(tm, "r000", 1, false, "v1");
SortedKeyValueIterator<Key,Value> it =
DeletingIterator.wrap(new SortedMapIterator(tm), false, Behavior.PROCESS);
it.seek(newRange("r000", 3), EMPTY_COL_FAMS, false);
assertTrue(it.hasTop());
assertEquals(newKey("r000", 3), it.getTopKey());
assertEquals("v3", it.getTopValue().toString());
it.next();
assertFalse(it.hasTop());
it.seek(newRange("r000", 3, false), EMPTY_COL_FAMS, false);
assertFalse(it.hasTop());
}
@Test
public void testFail() throws IOException {
TreeMap<Key,Value> tm = new TreeMap<>();
newKeyValue(tm, "r000", 3, false, "v3");
newKeyValue(tm, "r000", 2, false, "v2");
newKeyValue(tm, "r000", 2, true, "");
newKeyValue(tm, "r000", 1, false, "v1");
SortedKeyValueIterator<Key,Value> it =
DeletingIterator.wrap(new SortedMapIterator(tm), false, Behavior.FAIL);
it.seek(new Range(), EMPTY_COL_FAMS, false);
// first entry should pass
it.getTopKey();
it.next();
// second entry should fail due to delete
assertThrows(IllegalStateException.class, it::getTopKey);
it.next();
// third entry should pass
it.getTopKey();
it.next();
// fourth entry should pass
it.getTopKey();
it.next();
}
private Range newRange(String row, long ts, boolean inclusive) {
return new Range(newKey(row, ts), inclusive, null, true);
}
private Range newRange(String row, long ts) {
return newRange(row, ts, true);
}
private Key newKey(String row, long ts) {
return new Key(new Text(row), ts);
}
private void newKeyValue(TreeMap<Key,Value> tm, String row, long ts, boolean deleted,
String val) {
Key k = newKey(row, ts);
k.setDeleted(deleted);
tm.put(k, new Value(val));
}
}
| 9,514 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.system;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Optional;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.WrappingIterator;
import org.apache.accumulo.core.iterators.YieldCallback;
import org.apache.accumulo.core.iteratorsImpl.system.InterruptibleIterator;
import org.apache.accumulo.core.iteratorsImpl.system.IterationInterruptedException;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SourceSwitchingIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SourceSwitchingIterator.DataSource;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class SourceSwitchingIteratorTest {
Key newKey(String row, String cf, String cq, long time) {
return new Key(new Text(row), new Text(cf), new Text(cq), time);
}
void put(TreeMap<Key,Value> tm, String row, String cf, String cq, long time, Value val) {
tm.put(newKey(row, cf, cq, time), val);
}
void put(TreeMap<Key,Value> tm, String row, String cf, String cq, long time, String val) {
put(tm, row, cf, cq, time, new Value(val));
}
private void testAndCallNext(SortedKeyValueIterator<Key,Value> rdi, String row, String cf,
String cq, long time, String val, boolean callNext) throws Exception {
assertTrue(rdi.hasTop());
assertEquals(newKey(row, cf, cq, time), rdi.getTopKey());
assertEquals(val, rdi.getTopValue().toString());
if (callNext) {
rdi.next();
}
}
class TestDataSource implements DataSource {
DataSource next;
SortedKeyValueIterator<Key,Value> iter;
List<TestDataSource> copies = new ArrayList<>();
AtomicBoolean iflag;
TestDataSource(SortedKeyValueIterator<Key,Value> iter) {
this(iter, new ArrayList<>());
}
public TestDataSource(SortedKeyValueIterator<Key,Value> iter, List<TestDataSource> copies) {
this.iter = iter;
this.copies = copies;
copies.add(this);
}
@Override
public DataSource getNewDataSource() {
return next;
}
@Override
public boolean isCurrent() {
return next == null;
}
@Override
public SortedKeyValueIterator<Key,Value> iterator() {
if (iflag != null) {
((InterruptibleIterator) iter).setInterruptFlag(iflag);
}
return iter;
}
@Override
public DataSource getDeepCopyDataSource(IteratorEnvironment env) {
return new TestDataSource(iter.deepCopy(env), copies);
}
void setNext(TestDataSource next) {
this.next = next;
for (TestDataSource tds : copies) {
if (tds != this) {
tds.next = new TestDataSource(next.iter.deepCopy(null), next.copies);
}
}
}
@Override
public void setInterruptFlag(AtomicBoolean flag) {
this.iflag = flag;
}
}
@Test
public void test1() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq3", 5, "v2");
put(tm1, "r2", "cf1", "cq1", 5, "v3");
SortedMapIterator smi = new SortedMapIterator(tm1);
TestDataSource tds = new TestDataSource(smi);
SourceSwitchingIterator ssi = new SourceSwitchingIterator(tds);
ssi.seek(new Range(), new ArrayList<>(), false);
testAndCallNext(ssi, "r1", "cf1", "cq1", 5, "v1", true);
testAndCallNext(ssi, "r1", "cf1", "cq3", 5, "v2", true);
testAndCallNext(ssi, "r2", "cf1", "cq1", 5, "v3", true);
assertFalse(ssi.hasTop());
}
@Test
public void test2() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq3", 5, "v2");
put(tm1, "r2", "cf1", "cq1", 5, "v3");
SortedMapIterator smi = new SortedMapIterator(tm1);
TestDataSource tds = new TestDataSource(smi);
SourceSwitchingIterator ssi = new SourceSwitchingIterator(tds);
ssi.seek(new Range(), new ArrayList<>(), false);
testAndCallNext(ssi, "r1", "cf1", "cq1", 5, "v1", true);
TreeMap<Key,Value> tm2 = new TreeMap<>();
put(tm2, "r1", "cf1", "cq1", 5, "v4");
put(tm2, "r1", "cf1", "cq3", 5, "v5");
put(tm2, "r2", "cf1", "cq1", 5, "v6");
SortedMapIterator smi2 = new SortedMapIterator(tm2);
tds.next = new TestDataSource(smi2);
testAndCallNext(ssi, "r1", "cf1", "cq3", 5, "v2", true);
testAndCallNext(ssi, "r2", "cf1", "cq1", 5, "v6", true);
assertFalse(ssi.hasTop());
}
@Test
public void test3() throws Exception {
// test switching after a row
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq2", 5, "v2");
put(tm1, "r1", "cf1", "cq3", 5, "v3");
put(tm1, "r1", "cf1", "cq4", 5, "v4");
put(tm1, "r3", "cf1", "cq1", 5, "v5");
put(tm1, "r3", "cf1", "cq2", 5, "v6");
SortedMapIterator smi = new SortedMapIterator(tm1);
TestDataSource tds = new TestDataSource(smi);
SourceSwitchingIterator ssi = new SourceSwitchingIterator(tds, true);
ssi.seek(new Range(), new ArrayList<>(), false);
testAndCallNext(ssi, "r1", "cf1", "cq1", 5, "v1", true);
TreeMap<Key,Value> tm2 = new TreeMap<>(tm1);
put(tm2, "r1", "cf1", "cq5", 5, "v7"); // should not see this because it should not switch until
// the row is finished
put(tm2, "r2", "cf1", "cq1", 5, "v8"); // should see this new row after it switches
// setup a new data source, but it should not switch until the current row is finished
SortedMapIterator smi2 = new SortedMapIterator(tm2);
tds.next = new TestDataSource(smi2);
testAndCallNext(ssi, "r1", "cf1", "cq2", 5, "v2", true);
testAndCallNext(ssi, "r1", "cf1", "cq3", 5, "v3", true);
testAndCallNext(ssi, "r1", "cf1", "cq4", 5, "v4", true);
testAndCallNext(ssi, "r2", "cf1", "cq1", 5, "v8", true);
testAndCallNext(ssi, "r3", "cf1", "cq1", 5, "v5", true);
testAndCallNext(ssi, "r3", "cf1", "cq2", 5, "v6", true);
}
@Test
public void test4() throws Exception {
// ensure switch is done on initial seek
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq2", 5, "v2");
SortedMapIterator smi = new SortedMapIterator(tm1);
TestDataSource tds = new TestDataSource(smi);
SourceSwitchingIterator ssi = new SourceSwitchingIterator(tds, false);
TreeMap<Key,Value> tm2 = new TreeMap<>();
put(tm2, "r1", "cf1", "cq1", 6, "v3");
put(tm2, "r1", "cf1", "cq2", 6, "v4");
SortedMapIterator smi2 = new SortedMapIterator(tm2);
tds.next = new TestDataSource(smi2);
ssi.seek(new Range(), new ArrayList<>(), false);
testAndCallNext(ssi, "r1", "cf1", "cq1", 6, "v3", true);
testAndCallNext(ssi, "r1", "cf1", "cq2", 6, "v4", true);
}
@Test
public void test5() throws Exception {
// ensure switchNow() works w/ deepCopy()
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq2", 5, "v2");
SortedMapIterator smi = new SortedMapIterator(tm1);
TestDataSource tds = new TestDataSource(smi);
SourceSwitchingIterator ssi = new SourceSwitchingIterator(tds, false);
SortedKeyValueIterator<Key,Value> dc1 = ssi.deepCopy(null);
TreeMap<Key,Value> tm2 = new TreeMap<>();
put(tm2, "r1", "cf1", "cq1", 6, "v3");
put(tm2, "r2", "cf1", "cq2", 6, "v4");
SortedMapIterator smi2 = new SortedMapIterator(tm2);
TestDataSource tds2 = new TestDataSource(smi2);
tds.setNext(tds2);
ssi.switchNow();
ssi.seek(new Range("r1"), new ArrayList<>(), false);
dc1.seek(new Range("r2"), new ArrayList<>(), false);
testAndCallNext(ssi, "r1", "cf1", "cq1", 6, "v3", true);
assertFalse(ssi.hasTop());
testAndCallNext(dc1, "r2", "cf1", "cq2", 6, "v4", true);
assertFalse(dc1.hasTop());
}
@Test
public void testSetInterrupt() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "cf1", "cq1", 5, "v1");
SortedMapIterator smi = new SortedMapIterator(tm1);
TestDataSource tds = new TestDataSource(smi);
SourceSwitchingIterator ssi = new SourceSwitchingIterator(tds, false);
AtomicBoolean flag = new AtomicBoolean();
ssi.setInterruptFlag(flag);
assertSame(flag, tds.iflag);
final Range r1Range = new Range("r1");
final List<ByteSequence> columnFamilies = List.of();
ssi.seek(r1Range, columnFamilies, false);
testAndCallNext(ssi, "r1", "cf1", "cq1", 5, "v1", true);
assertFalse(ssi.hasTop());
flag.set(true);
assertThrows(IterationInterruptedException.class,
() -> ssi.seek(r1Range, columnFamilies, false));
}
private Range doYield(Range r, SourceSwitchingIterator ssi, YieldCallback<Key> yield)
throws IOException {
while (yield.hasYielded()) {
Key yieldPosition = yield.getPositionAndReset();
if (!r.contains(yieldPosition)) {
throw new IOException("Underlying iterator yielded to a position outside of its range: "
+ yieldPosition + " not in " + r);
}
r = new Range(yieldPosition, false, null, r.isEndKeyInclusive());
ssi.seek(r, new ArrayList<>(), false);
}
return r;
}
@Test
public void testYield() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq3", 5, "v2");
put(tm1, "r2", "cf1", "cq1", 5, "v3");
SortedMapIterator smi = new SortedMapIterator(tm1);
YieldingIterator ymi = new YieldingIterator(smi);
TestDataSource tds = new TestDataSource(ymi);
SourceSwitchingIterator ssi = new SourceSwitchingIterator(tds);
YieldCallback<Key> yield = new YieldCallback<>();
ssi.enableYielding(yield);
Range r = new Range();
ssi.seek(r, new ArrayList<>(), false);
r = doYield(r, ssi, yield);
testAndCallNext(ssi, "r1", "cf1", "cq1", 5, "v1", true);
r = doYield(r, ssi, yield);
testAndCallNext(ssi, "r1", "cf1", "cq3", 5, "v2", true);
r = doYield(r, ssi, yield);
testAndCallNext(ssi, "r2", "cf1", "cq1", 5, "v3", true);
r = doYield(r, ssi, yield);
assertFalse(ssi.hasTop());
}
/**
* This iterator which implements yielding will yield after every other next and every other seek
* call.
*/
private final AtomicBoolean yieldNextKey = new AtomicBoolean(false);
private final AtomicBoolean yieldSeekKey = new AtomicBoolean(false);
public class YieldingIterator extends WrappingIterator {
private Optional<YieldCallback<Key>> yield = Optional.empty();
public YieldingIterator(SortedKeyValueIterator<Key,Value> source) {
setSource(source);
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new YieldingIterator(getSource().deepCopy(env));
}
@Override
public boolean hasTop() {
return (!(yield.isPresent() && yield.orElseThrow().hasYielded()) && super.hasTop());
}
@Override
public void next() throws IOException {
boolean yielded = false;
// yield on every other next call.
yieldNextKey.set(!yieldNextKey.get());
if (yield.isPresent() && yieldNextKey.get()) {
yielded = true;
// since we are not actually skipping keys underneath, simply use the key following the top
// key as the yield key
yield.orElseThrow()
.yield(getTopKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME));
}
// if not yielding, then simply pass on the next call
if (!yielded) {
super.next();
}
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
boolean yielded = false;
if (!range.isStartKeyInclusive()) {
// yield on every other seek call.
yieldSeekKey.set(!yieldSeekKey.get());
if (yield.isPresent() && yieldSeekKey.get()) {
yielded = true;
// since we are not actually skipping keys underneath, simply use the key following the
// range start key
yield.orElseThrow()
.yield(range.getStartKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME));
}
}
// if not yielding, then simply pass on the call to the source
if (!yielded) {
super.seek(range, columnFamilies, inclusive);
}
}
@Override
public void enableYielding(YieldCallback<Key> yield) {
this.yield = Optional.of(yield);
}
}
}
| 9,515 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/system/ColumnFamilySkippingIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.system;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Collection;
import java.util.HashSet;
import java.util.TreeMap;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iteratorsImpl.system.ColumnFamilySkippingIterator;
import org.apache.accumulo.core.iteratorsImpl.system.CountingIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class ColumnFamilySkippingIteratorTest {
private static final Collection<ByteSequence> EMPTY_SET = new HashSet<>();
Key newKey(String row, String cf, String cq, long time) {
return new Key(new Text(row), new Text(cf), new Text(cq), time);
}
Key newKey(int row, int cf, int cq, long time) {
return newKey(String.format("%06d", row), String.format("%06d", cf), String.format("%06d", cq),
time);
}
void put(TreeMap<Key,Value> tm, String row, String cf, String cq, long time, Value val) {
tm.put(newKey(row, cf, cq, time), val);
}
void put(TreeMap<Key,Value> tm, String row, String cf, String cq, long time, String val) {
put(tm, row, cf, cq, time, new Value(val));
}
void put(TreeMap<Key,Value> tm, int row, int cf, int cq, long time, int val) {
tm.put(newKey(row, cf, cq, time), new Value(val + ""));
}
private void testAndCallnext(ColumnFamilySkippingIterator rdi, String row, String cf, String cq,
long time, String val) throws Exception {
assertTrue(rdi.hasTop());
assertEquals(newKey(row, cf, cq, time), rdi.getTopKey());
assertEquals(val, rdi.getTopValue().toString());
rdi.next();
}
@Test
public void test1() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
put(tm1, "r1", "cf1", "cq1", 5, "v1");
put(tm1, "r1", "cf1", "cq3", 5, "v2");
put(tm1, "r2", "cf1", "cq1", 5, "v3");
put(tm1, "r2", "cf2", "cq4", 5, "v4");
put(tm1, "r2", "cf2", "cq5", 5, "v5");
put(tm1, "r3", "cf3", "cq6", 5, "v6");
ColumnFamilySkippingIterator cfi = new ColumnFamilySkippingIterator(new SortedMapIterator(tm1));
cfi.seek(new Range(), EMPTY_SET, true);
assertFalse(cfi.hasTop());
cfi.seek(new Range(), EMPTY_SET, false);
assertTrue(cfi.hasTop());
TreeMap<Key,Value> tm2 = new TreeMap<>();
while (cfi.hasTop()) {
tm2.put(cfi.getTopKey(), cfi.getTopValue());
cfi.next();
}
assertEquals(tm1, tm2);
HashSet<ByteSequence> colfams = new HashSet<>();
colfams.add(new ArrayByteSequence("cf2"));
cfi.seek(new Range(), colfams, true);
testAndCallnext(cfi, "r2", "cf2", "cq4", 5, "v4");
testAndCallnext(cfi, "r2", "cf2", "cq5", 5, "v5");
assertFalse(cfi.hasTop());
colfams.add(new ArrayByteSequence("cf3"));
colfams.add(new ArrayByteSequence("cf4"));
cfi.seek(new Range(), colfams, true);
testAndCallnext(cfi, "r2", "cf2", "cq4", 5, "v4");
testAndCallnext(cfi, "r2", "cf2", "cq5", 5, "v5");
testAndCallnext(cfi, "r3", "cf3", "cq6", 5, "v6");
assertFalse(cfi.hasTop());
cfi.seek(new Range(), colfams, false);
testAndCallnext(cfi, "r1", "cf1", "cq1", 5, "v1");
testAndCallnext(cfi, "r1", "cf1", "cq3", 5, "v2");
testAndCallnext(cfi, "r2", "cf1", "cq1", 5, "v3");
assertFalse(cfi.hasTop());
}
@Test
public void test2() throws Exception {
TreeMap<Key,Value> tm1 = new TreeMap<>();
for (int r = 0; r < 10; r++) {
for (int cf = 0; cf < 1000; cf++) {
for (int cq = 0; cq < 3; cq++) {
put(tm1, r, cf, cq, 6, r * cf * cq);
}
}
}
HashSet<ByteSequence> allColfams = new HashSet<>();
for (int cf = 0; cf < 1000; cf++) {
allColfams.add(new ArrayByteSequence(String.format("%06d", cf)));
}
ColumnFamilySkippingIterator cfi = new ColumnFamilySkippingIterator(new SortedMapIterator(tm1));
HashSet<ByteSequence> colfams = new HashSet<>();
runTest(cfi, 30000, 0, allColfams, colfams);
colfams.add(new ArrayByteSequence(String.format("%06d", 60)));
runTest(cfi, 30000, 30, allColfams, colfams);
colfams.add(new ArrayByteSequence(String.format("%06d", 602)));
runTest(cfi, 30000, 60, allColfams, colfams);
colfams.add(new ArrayByteSequence(String.format("%06d", 0)));
runTest(cfi, 30000, 90, allColfams, colfams);
colfams.add(new ArrayByteSequence(String.format("%06d", 999)));
runTest(cfi, 30000, 120, allColfams, colfams);
colfams.remove(new ArrayByteSequence(String.format("%06d", 0)));
runTest(cfi, 30000, 90, allColfams, colfams);
colfams.add(new ArrayByteSequence(String.format("%06d", 1000)));
runTest(cfi, 30000, 90, allColfams, colfams);
colfams.remove(new ArrayByteSequence(String.format("%06d", 999)));
runTest(cfi, 30000, 60, allColfams, colfams);
colfams.add(new ArrayByteSequence(String.format("%06d", 61)));
runTest(cfi, 30000, 90, allColfams, colfams);
for (int i = 62; i < 100; i++) {
colfams.add(new ArrayByteSequence(String.format("%06d", i)));
}
runTest(cfi, 30000, 1230, allColfams, colfams);
}
private void runTest(ColumnFamilySkippingIterator cfi, int total, int expected,
HashSet<ByteSequence> allColfams, HashSet<ByteSequence> colfams) throws Exception {
cfi.seek(new Range(), colfams, true);
HashSet<ByteSequence> excpected1 = new HashSet<>(colfams);
excpected1.retainAll(allColfams);
runTest(cfi, expected, excpected1);
HashSet<ByteSequence> excpected2 = new HashSet<>(allColfams);
excpected2.removeAll(colfams);
cfi.seek(new Range(), colfams, false);
runTest(cfi, total - expected, excpected2);
}
private void runTest(ColumnFamilySkippingIterator cfi, int expected,
HashSet<ByteSequence> colfams) throws Exception {
int count = 0;
HashSet<ByteSequence> ocf = new HashSet<>();
while (cfi.hasTop()) {
count++;
ocf.add(cfi.getTopKey().getColumnFamilyData());
cfi.next();
}
assertEquals(expected, count);
assertEquals(colfams, ocf);
}
@Test
public void test3() throws Exception {
// construct test where ColumnFamilySkippingIterator might try to seek past the end of the user
// supplied range
TreeMap<Key,Value> tm1 = new TreeMap<>();
for (int r = 0; r < 3; r++) {
for (int cf = 4; cf < 1000; cf++) {
for (int cq = 0; cq < 1; cq++) {
put(tm1, r, cf, cq, 6, r * cf * cq);
}
}
}
CountingIterator ci = new CountingIterator(new SortedMapIterator(tm1));
ColumnFamilySkippingIterator cfi = new ColumnFamilySkippingIterator(ci);
HashSet<ByteSequence> colfams = new HashSet<>();
colfams.add(new ArrayByteSequence(String.format("%06d", 4)));
Range range = new Range(newKey(0, 4, 0, 6), true, newKey(0, 400, 0, 6), true);
cfi.seek(range, colfams, true);
assertTrue(cfi.hasTop());
assertEquals(newKey(0, 4, 0, 6), cfi.getTopKey());
cfi.next();
assertFalse(cfi.hasTop());
colfams.add(new ArrayByteSequence(String.format("%06d", 500)));
cfi.seek(range, colfams, true);
assertTrue(cfi.hasTop());
assertEquals(newKey(0, 4, 0, 6), cfi.getTopKey());
cfi.next();
assertFalse(cfi.hasTop());
range = new Range(newKey(0, 4, 0, 6), true, newKey(1, 400, 0, 6), true);
cfi.seek(range, colfams, true);
assertTrue(cfi.hasTop());
assertEquals(newKey(0, 4, 0, 6), cfi.getTopKey());
cfi.next();
assertTrue(cfi.hasTop());
assertEquals(newKey(0, 500, 0, 6), cfi.getTopKey());
cfi.next();
assertTrue(cfi.hasTop());
assertEquals(newKey(1, 4, 0, 6), cfi.getTopKey());
cfi.next();
assertFalse(cfi.hasTop());
// System.out.println(ci.getCount());
}
}
| 9,516 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/system/MultiIteratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.system;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import java.util.TreeMap;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.MultiIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class MultiIteratorTest {
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
public static Key newKey(int row, long ts) {
return new Key(newRow(row), ts);
}
public static Range newRange(int row, long ts) {
return new Range(newKey(row, ts), null);
}
public static void newKeyValue(TreeMap<Key,Value> tm, int row, long ts, boolean deleted,
String val) {
Key k = newKey(row, ts);
k.setDeleted(deleted);
tm.put(k, new Value(val));
}
public static Text newRow(int row) {
return new Text(String.format("r%03d", row));
}
void verify(int start, int end, Key seekKey, Text endRow, Text prevEndRow, boolean init,
boolean incrRow, List<TreeMap<Key,Value>> maps) throws IOException {
List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<>(maps.size());
for (TreeMap<Key,Value> map : maps) {
iters.add(new SortedMapIterator(map));
}
MultiIterator mi;
if (endRow == null && prevEndRow == null) {
mi = new MultiIterator(iters, init);
} else {
Range range = new Range(prevEndRow, false, endRow, true);
if (init) {
for (SortedKeyValueIterator<Key,Value> iter : iters) {
iter.seek(range, Set.of(), false);
}
}
mi = new MultiIterator(iters, range);
if (init) {
mi.seek(range, Set.of(), false);
}
}
if (seekKey != null) {
mi.seek(new Range(seekKey, null), EMPTY_COL_FAMS, false);
} else {
mi.seek(new Range(), EMPTY_COL_FAMS, false);
}
int i = start;
while (mi.hasTop()) {
if (incrRow) {
assertEquals(newKey(i, 0), mi.getTopKey());
} else {
assertEquals(newKey(0, i), mi.getTopKey());
}
assertEquals("v" + i, mi.getTopValue().toString());
mi.next();
if (incrRow) {
i++;
} else {
i--;
}
}
assertEquals(end, i,
"start=" + start + " end=" + end + " seekKey=" + seekKey + " endRow=" + endRow
+ " prevEndRow=" + prevEndRow + " init=" + init + " incrRow=" + incrRow + " maps="
+ maps);
}
void verify(int start, Key seekKey, List<TreeMap<Key,Value>> maps) throws IOException {
if (seekKey != null) {
verify(start, -1, seekKey, null, null, false, false, maps);
}
verify(start, -1, seekKey, null, null, true, false, maps);
}
@Test
public void test1() throws IOException {
// TEST non overlapping inputs
TreeMap<Key,Value> tm1 = new TreeMap<>();
List<TreeMap<Key,Value>> tmpList = new ArrayList<>(2);
for (int i = 0; i < 4; i++) {
newKeyValue(tm1, 0, i, false, "v" + i);
}
tmpList.add(tm1);
tm1 = new TreeMap<>();
for (int i = 4; i < 8; i++) {
newKeyValue(tm1, 0, i, false, "v" + i);
}
tmpList.add(tm1);
for (int seek = -1; seek < 8; seek++) {
if (seek == 7) {
verify(seek, null, tmpList);
}
verify(seek, newKey(0, seek), tmpList);
}
}
@Test
public void test2() throws IOException {
// TEST overlapping inputs
TreeMap<Key,Value> tm1 = new TreeMap<>();
TreeMap<Key,Value> tm2 = new TreeMap<>();
List<TreeMap<Key,Value>> tmpList = new ArrayList<>(2);
for (int i = 0; i < 8; i++) {
if (i % 2 == 0) {
newKeyValue(tm1, 0, i, false, "v" + i);
} else {
newKeyValue(tm2, 0, i, false, "v" + i);
}
}
tmpList.add(tm1);
tmpList.add(tm2);
for (int seek = -1; seek < 8; seek++) {
if (seek == 7) {
verify(seek, null, tmpList);
}
verify(seek, newKey(0, seek), tmpList);
}
}
@Test
public void test3() throws IOException {
// TEST single input
TreeMap<Key,Value> tm1 = new TreeMap<>();
List<TreeMap<Key,Value>> tmpList = new ArrayList<>(2);
for (int i = 0; i < 8; i++) {
newKeyValue(tm1, 0, i, false, "v" + i);
}
tmpList.add(tm1);
for (int seek = -1; seek < 8; seek++) {
if (seek == 7) {
verify(seek, null, tmpList);
}
verify(seek, newKey(0, seek), tmpList);
}
}
@Test
public void test4() throws IOException {
// TEST empty input
TreeMap<Key,Value> tm1 = new TreeMap<>();
List<SortedKeyValueIterator<Key,Value>> skvil = new ArrayList<>(1);
skvil.add(new SortedMapIterator(tm1));
MultiIterator mi = new MultiIterator(skvil, true);
assertFalse(mi.hasTop());
mi.seek(newRange(0, 6), EMPTY_COL_FAMS, false);
assertFalse(mi.hasTop());
}
@Test
public void test5() throws IOException {
// TEST overlapping inputs AND prevRow AND endRow AND seek
TreeMap<Key,Value> tm1 = new TreeMap<>();
TreeMap<Key,Value> tm2 = new TreeMap<>();
List<TreeMap<Key,Value>> tmpList = new ArrayList<>(2);
for (int i = 0; i < 8; i++) {
if (i % 2 == 0) {
newKeyValue(tm1, i, 0, false, "v" + i);
} else {
newKeyValue(tm2, i, 0, false, "v" + i);
}
}
tmpList.add(tm1);
tmpList.add(tm2);
for (int seek = -1; seek < 9; seek++) {
verify(Math.max(0, seek), 8, newKey(seek, 0), null, null, true, true, tmpList);
verify(Math.max(0, seek), 8, newKey(seek, 0), null, null, false, true, tmpList);
for (int er = seek; er < 10; er++) {
int end = seek > er ? seek : Math.min(er + 1, 8);
int noSeekEnd = Math.min(er + 1, 8);
if (er < 0) {
noSeekEnd = 0;
}
verify(0, noSeekEnd, null, newRow(er), null, true, true, tmpList);
verify(Math.max(0, seek), end, newKey(seek, 0), newRow(er), null, true, true, tmpList);
verify(Math.max(0, seek), end, newKey(seek, 0), newRow(er), null, false, true, tmpList);
for (int per = -1; per < er; per++) {
int start = Math.max(per + 1, seek);
if (start > er) {
end = start;
}
if (per >= 8) {
end = start;
}
int noSeekStart = Math.max(0, per + 1);
if (er < 0 || per >= 7) {
noSeekEnd = noSeekStart;
}
verify(noSeekStart, noSeekEnd, null, newRow(er), newRow(per), true, true, tmpList);
verify(Math.max(0, start), end, newKey(seek, 0), newRow(er), newRow(per), true, true,
tmpList);
verify(Math.max(0, start), end, newKey(seek, 0), newRow(er), newRow(per), false, true,
tmpList);
}
}
}
}
@Test
public void test6() throws IOException {
// TEst setting an endKey
TreeMap<Key,Value> tm1 = new TreeMap<>();
newKeyValue(tm1, 3, 0, false, "1");
newKeyValue(tm1, 4, 0, false, "2");
newKeyValue(tm1, 6, 0, false, "3");
List<SortedKeyValueIterator<Key,Value>> skvil = new ArrayList<>(1);
skvil.add(new SortedMapIterator(tm1));
MultiIterator mi = new MultiIterator(skvil, true);
mi.seek(new Range(null, true, newKey(5, 9), false), EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals(mi.getTopKey(), newKey(3, 0));
assertEquals("1", mi.getTopValue().toString());
mi.next();
assertTrue(mi.hasTop());
assertEquals(mi.getTopKey(), newKey(4, 0));
assertEquals("2", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
mi.seek(new Range(newKey(4, 10), true, newKey(5, 9), false), EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals(mi.getTopKey(), newKey(4, 0));
assertEquals("2", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
mi.seek(new Range(newKey(4, 10), true, newKey(6, 0), false), EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals(mi.getTopKey(), newKey(4, 0));
assertEquals("2", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
mi.seek(new Range(newKey(4, 10), true, newKey(6, 0), true), EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals(mi.getTopKey(), newKey(4, 0));
assertEquals("2", mi.getTopValue().toString());
mi.next();
assertTrue(mi.hasTop());
assertEquals(mi.getTopKey(), newKey(6, 0));
assertEquals("3", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
mi.seek(new Range(newKey(4, 0), true, newKey(6, 0), false), EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals(mi.getTopKey(), newKey(4, 0));
assertEquals("2", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
mi.seek(new Range(newKey(4, 0), false, newKey(6, 0), false), EMPTY_COL_FAMS, false);
assertFalse(mi.hasTop());
mi.seek(new Range(newKey(4, 0), false, newKey(6, 0), true), EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals(mi.getTopKey(), newKey(6, 0));
assertEquals("3", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
}
@Test
public void test7() throws IOException {
// TEst setting an endKey
TreeMap<Key,Value> tm1 = new TreeMap<>();
newKeyValue(tm1, 0, 3, false, "1");
newKeyValue(tm1, 0, 2, false, "2");
newKeyValue(tm1, 0, 1, false, "3");
newKeyValue(tm1, 0, 0, false, "4");
newKeyValue(tm1, 1, 2, false, "5");
newKeyValue(tm1, 1, 1, false, "6");
newKeyValue(tm1, 1, 0, false, "7");
newKeyValue(tm1, 2, 1, false, "8");
newKeyValue(tm1, 2, 0, false, "9");
List<SortedKeyValueIterator<Key,Value>> skvil = new ArrayList<>(1);
skvil.add(new SortedMapIterator(tm1));
KeyExtent extent = new KeyExtent(TableId.of("tablename"), newRow(1), newRow(0));
MultiIterator mi = new MultiIterator(skvil, extent);
Range r1 = new Range((Text) null, (Text) null);
mi.seek(r1, EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals("5", mi.getTopValue().toString());
mi.next();
assertTrue(mi.hasTop());
assertEquals("6", mi.getTopValue().toString());
mi.next();
assertTrue(mi.hasTop());
assertEquals("7", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
Range r2 = new Range(newKey(0, 0), true, newKey(1, 1), true);
mi.seek(r2, EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals("5", mi.getTopValue().toString());
mi.next();
assertTrue(mi.hasTop());
assertEquals("6", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
Range r3 = new Range(newKey(0, 0), false, newKey(1, 1), false);
mi.seek(r3, EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals("5", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
Range r4 = new Range(newKey(1, 2), true, newKey(1, 1), false);
mi.seek(r4, EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals("5", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
Range r5 = new Range(newKey(1, 2), false, newKey(1, 1), true);
mi.seek(r5, EMPTY_COL_FAMS, false);
assertTrue(mi.hasTop());
assertEquals("6", mi.getTopValue().toString());
mi.next();
assertFalse(mi.hasTop());
Range r6 = new Range(newKey(2, 1), true, newKey(2, 0), true);
mi.seek(r6, EMPTY_COL_FAMS, false);
assertFalse(mi.hasTop());
Range r7 = new Range(newKey(0, 3), true, newKey(0, 1), true);
mi.seek(r7, EMPTY_COL_FAMS, false);
assertFalse(mi.hasTop());
}
}
| 9,517 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/system/ColumnFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.system;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Collections;
import java.util.HashSet;
import java.util.TreeMap;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.ColumnQualifierFilter;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class ColumnFilterTest {
Key newKey(String row, String cf, String cq) {
return new Key(new Text(row), new Text(cf), new Text(cq));
}
Column newColumn(String cf) {
return new Column(cf.getBytes(), null, null);
}
Column newColumn(String cf, String cq) {
return new Column(cf.getBytes(), cq.getBytes(), null);
}
@Test
public void test1() {
TreeMap<Key,Value> data = new TreeMap<>();
data.put(newKey("r1", "cf1", "cq1"), new Value(""));
data.put(newKey("r1", "cf2", "cq1"), new Value(""));
HashSet<Column> columns = new HashSet<>();
columns.add(newColumn("cf1"));
SortedMapIterator smi = new SortedMapIterator(data);
SortedKeyValueIterator<Key,Value> cf = ColumnQualifierFilter.wrap(smi, columns);
assertSame(smi, cf);
}
@Test
public void test2() throws Exception {
TreeMap<Key,Value> data = new TreeMap<>();
data.put(newKey("r1", "cf1", "cq1"), new Value(""));
data.put(newKey("r1", "cf2", "cq1"), new Value(""));
data.put(newKey("r1", "cf2", "cq2"), new Value(""));
HashSet<Column> columns = new HashSet<>();
columns.add(newColumn("cf1"));
columns.add(newColumn("cf2", "cq1"));
SortedKeyValueIterator<Key,Value> cf =
ColumnQualifierFilter.wrap(new SortedMapIterator(data), columns);
cf.seek(new Range(), Collections.emptySet(), false);
assertTrue(cf.hasTop());
assertEquals(newKey("r1", "cf1", "cq1"), cf.getTopKey());
cf.next();
assertTrue(cf.hasTop());
assertEquals(newKey("r1", "cf2", "cq1"), cf.getTopKey());
cf.next();
assertFalse(cf.hasTop());
}
@Test
public void test3() throws Exception {
TreeMap<Key,Value> data = new TreeMap<>();
data.put(newKey("r1", "cf1", "cq1"), new Value(""));
data.put(newKey("r1", "cf2", "cq1"), new Value(""));
data.put(newKey("r1", "cf2", "cq2"), new Value(""));
HashSet<Column> columns = new HashSet<>();
columns.add(newColumn("cf2", "cq1"));
SortedKeyValueIterator<Key,Value> cf =
ColumnQualifierFilter.wrap(new SortedMapIterator(data), columns);
cf.seek(new Range(), Collections.emptySet(), false);
assertTrue(cf.hasTop());
assertEquals(newKey("r1", "cf2", "cq1"), cf.getTopKey());
cf.next();
assertFalse(cf.hasTop());
}
}
| 9,518 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/iterators/system/VisibilityFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iterators.system;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.HashSet;
import java.util.TreeMap;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
import org.apache.accumulo.core.iteratorsImpl.system.VisibilityFilter;
import org.apache.accumulo.core.security.Authorizations;
import org.junit.jupiter.api.Test;
public class VisibilityFilterTest {
@Test
public void testBadVisibility() throws IOException {
TreeMap<Key,Value> tm = new TreeMap<>();
tm.put(new Key("r1", "cf1", "cq1", "A&"), new Value());
SortedKeyValueIterator<Key,Value> filter =
VisibilityFilter.wrap(new SortedMapIterator(tm), new Authorizations("A"), "".getBytes());
filter.seek(new Range(), new HashSet<>(), false);
assertFalse(filter.hasTop());
}
@Test
public void testEmptyAuths() throws IOException {
TreeMap<Key,Value> tm = new TreeMap<>();
tm.put(new Key("r1", "cf1", "cq1", ""), new Value());
tm.put(new Key("r1", "cf1", "cq2", "C"), new Value());
tm.put(new Key("r1", "cf1", "cq3", ""), new Value());
SortedKeyValueIterator<Key,Value> filter =
VisibilityFilter.wrap(new SortedMapIterator(tm), Authorizations.EMPTY, "".getBytes());
filter.seek(new Range(), new HashSet<>(), false);
assertTrue(filter.hasTop());
assertEquals(new Key("r1", "cf1", "cq1", ""), filter.getTopKey());
filter.next();
assertTrue(filter.hasTop());
assertEquals(new Key("r1", "cf1", "cq3", ""), filter.getTopKey());
filter.next();
assertFalse(filter.hasTop());
}
}
| 9,519 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/Constants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core;
import static java.nio.charset.StandardCharsets.UTF_8;
public class Constants {
// defines Accumulo data version constants
public static final String VERSION = FilteredConstants.VERSION;
public static final String VERSION_DIR = "version";
public static final String APPNAME = "org.apache.accumulo";
// important directories
public static final String INSTANCE_ID_DIR = "instance_id";
public static final String TABLE_DIR = "tables";
public static final String RECOVERY_DIR = "recovery";
public static final String WAL_DIR = "wal";
// Zookeeper locations
public static final String ZROOT = "/accumulo";
public static final String ZINSTANCES = "/instances";
public static final String ZUSERS = "/users";
public static final String ZTABLES = "/tables";
public static final byte[] ZTABLES_INITIAL_ID = {'0'};
public static final String ZTABLE_NAME = "/name";
public static final String ZTABLE_DELETE_MARKER = "/deleting";
public static final String ZTABLE_STATE = "/state";
public static final String ZTABLE_FLUSH_ID = "/flush-id";
public static final String ZTABLE_COMPACT_ID = "/compact-id";
public static final String ZTABLE_COMPACT_CANCEL_ID = "/compact-cancel-id";
public static final String ZTABLE_NAMESPACE = "/namespace";
public static final String ZNAMESPACES = "/namespaces";
public static final String ZNAMESPACE_NAME = "/name";
public static final String ZMANAGERS = "/managers";
public static final String ZMANAGER_LOCK = ZMANAGERS + "/lock";
public static final String ZMANAGER_GOAL_STATE = ZMANAGERS + "/goal_state";
public static final String ZMANAGER_TICK = ZMANAGERS + "/tick";
public static final String ZGC = "/gc";
public static final String ZGC_LOCK = ZGC + "/lock";
public static final String ZMONITOR = "/monitor";
public static final String ZMONITOR_LOCK = ZMONITOR + "/lock";
public static final String ZMONITOR_HTTP_ADDR = ZMONITOR + "/http_addr";
// used by < 2.1 table and namespace configurations
public static final String ZCONF_LEGACY = "/conf";
public static final String ZCONFIG = "/config";
public static final String ZTSERVERS = "/tservers";
public static final String ZSSERVERS = "/sservers";
public static final String ZCOMPACTORS = "/compactors";
public static final String ZCOORDINATOR = "/coordinators";
public static final String ZCOORDINATOR_LOCK = ZCOORDINATOR + "/lock";
public static final String ZDEAD = "/dead";
public static final String ZDEADTSERVERS = ZDEAD + "/tservers";
public static final String ZTRACERS = "/tracers";
public static final String ZPROBLEMS = "/problems";
public static final String BULK_ARBITRATOR_TYPE = "bulkTx";
public static final String ZFATE = "/fate";
public static final String ZNEXT_FILE = "/next_file";
public static final String ZBULK_FAILED_COPYQ = "/bulk_failed_copyq";
public static final String ZHDFS_RESERVATIONS = "/hdfs_reservations";
public static final String ZRECOVERY = "/recovery";
/**
* Base znode for storing secret keys that back delegation tokens
*/
public static final String ZDELEGATION_TOKEN_KEYS = "/delegation_token_keys";
public static final String ZTABLE_LOCKS = "/table_locks";
public static final String BULK_PREFIX = "b-";
public static final String BULK_RENAME_FILE = "renames.json";
public static final String BULK_LOAD_MAPPING = "loadmap.json";
public static final String CLONE_PREFIX = "c-";
public static final byte[] CLONE_PREFIX_BYTES = CLONE_PREFIX.getBytes(UTF_8);
// this affects the table client caching of metadata
public static final int SCAN_BATCH_SIZE = 1000;
// Scanners will default to fetching 3 batches of Key/Value pairs before asynchronously
// fetching the next batch.
public static final long SCANNER_DEFAULT_READAHEAD_THRESHOLD = 3L;
public static final int MAX_DATA_TO_PRINT = 64;
public static final String CORE_PACKAGE_NAME = "org.apache.accumulo.core";
public static final String GENERATED_TABLET_DIRECTORY_PREFIX = "t-";
public static final String EXPORT_METADATA_FILE = "metadata.bin";
public static final String EXPORT_TABLE_CONFIG_FILE = "table_config.txt";
public static final String EXPORT_FILE = "exportMetadata.zip";
public static final String EXPORT_INFO_FILE = "accumulo_export_info.txt";
public static final String IMPORT_MAPPINGS_FILE = "mappings.txt";
public static final String HDFS_TABLES_DIR = "/tables";
public static final int DEFAULT_VISIBILITY_CACHE_SIZE = 1000;
}
| 9,520 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/metrics/MetricsProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.metrics;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.micrometer.core.instrument.MeterRegistry;
/**
* Prior to 2.1.0 Accumulo used the <a href=
* "https://hadoop.apache.org/docs/current/api/org/apache/hadoop/metrics2/package-summary.html">Hadoop
* Metrics2</a> framework. In 2.1.0 Accumulo migrated away from the Metrics2 framework to
* <a href="https://micrometer.io/">Micrometer</a>. Micrometer suggests using a particular
* <a href="https://micrometer.io/docs/concepts#_naming_meters">naming convention</a> for the
* metrics. The table below contains a mapping of the old to new metric names.
* <table border="1">
* <caption>Summary of Metric Changes</caption> <!-- fate -->
* <tr>
* <th>Old Name</th>
* <th>Hadoop Metrics2 Type</th>
* <th>New Name</th>
* <th>Micrometer Type</th>
* <th>Notes</th>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_LOW_MEMORY}</td>
* <td>Guage</td>
* <td>reports 1 when process memory usage is above threshold, 0 when memory is okay</td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_COMPACTOR_MAJC_STUCK}</td>
* <td>LongTaskTimer</td>
* <td></td>
* </tr>
* <tr>
* <td>currentFateOps</td>
* <td>Gauge</td>
* <td>{@link #METRICS_FATE_TOTAL_IN_PROGRESS}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>FateTxOpType_{name}</td>
* <td>Gauge</td>
* <td>{@link #METRICS_FATE_TYPE_IN_PROGRESS}</td>
* <td>Gauge</td>
* <td>Previously there was a metric per operation type with the count of in-progress transactions
* of that type. Now there is one metric and the type is in the tag op.type</td>
* </tr>
* <tr>
* <td>totalFateOps</td>
* <td>Gauge</td>
* <td>{@link #METRICS_FATE_OPS_ACTIVITY}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>totalZkConnErrors</td>
* <td>Gauge</td>
* <td>{@link #METRICS_FATE_ERRORS}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>FateTxState_NEW</td>
* <td>Gauge</td>
* <td>{@link #METRICS_FATE_TX}</td>
* <td>Gauge</td>
* <td>The state is now in a tag: state=new</td>
* </tr>
* <tr>
* <td>FateTxState_IN_PROGRESS</td>
* <td>Gauge</td>
* <td>{@link #METRICS_FATE_TX}</td>
* <td>Gauge</td>
* <td>The state is now in a tag: state=in.progress</td>
* </tr>
* <tr>
* <td>FateTxState_FAILED_IN_PROGRESS</td>
* <td>Gauge</td>
* <td>{@link #METRICS_FATE_TX}</td>
* <td>Gauge</td>
* <td>The state is now in a tag: state=failed.in.progress</td>
* </tr>
* <tr>
* <td>FateTxState_FAILED</td>
* <td>Gauge</td>
* <td>{@link #METRICS_FATE_TX}</td>
* <td>Gauge</td>
* <td>The state is now in a tag: state=failed</td>
* </tr>
* <tr>
* <td>FateTxState_SUCCESSFUL</td>
* <td>Gauge</td>
* <td>{@link #METRICS_FATE_TX}</td>
* <td>Gauge</td>
* <td>The state is now in a tag: state=successful</td>
* </tr>
* <tr>
* <td>FateTxState_UNKNOWN</td>
* <td>Gauge</td>
* <td>{@link #METRICS_FATE_TX}</td>
* <td>Gauge</td>
* <td>The state is now in a tag: state=unknown</td>
* </tr>
* <!-- garbage collection -->
* <tr>
* <td>AccGcStarted</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_STARTED}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcFinished</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_FINISHED}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcCandidates</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_CANDIDATES}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcInUse</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_IN_USE}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcDeleted</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_DELETED}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcErrors</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_ERRORS}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcWalStarted</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_WAL_STARTED}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcWalFinished</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_WAL_FINISHED}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcWalCandidates</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_WAL_CANDIDATES}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcWalInUse</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_WAL_IN_USE}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcWalDeleted</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_WAL_DELETED}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcWalErrors</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_WAL_ERRORS}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcPosOpDuration</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_POST_OP_DURATION}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>AccGcRunCycleCount</td>
* <td>Gauge</td>
* <td>{@link #METRICS_GC_RUN_CYCLE}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <!-- tablet server -->
* <tr>
* <td>entries</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_ENTRIES}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>entriesInMem</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_MEM_ENTRIES}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>activeMajCs</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_MAJC_RUNNING}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_TSERVER_MAJC_STUCK}</td>
* <td>LongTaskTimer</td>
* <td></td>
* </tr>
* <tr>
* <td>queuedMajCs</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_MAJC_QUEUED}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>activeMinCs</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_MINC_RUNNING}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>queuedMinCs</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_MINC_QUEUED}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>totalMinCs</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_MINC_TOTAL}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>onlineTablets</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_TABLETS_ONLINE}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_TSERVER_TABLETS_LONG_ASSIGNMENTS}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>openingTablets</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_TABLETS_OPENING}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>unopenedTablets</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_TABLETS_UNOPENED}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>filesPerTablet</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_TABLETS_FILES}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>queries</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_QUERIES}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>scannedRate</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_SCANNED_ENTRIES}</td>
* <td>Gauge</td>
* <td>Prior to 2.1.0 this metric was reported as a rate, it is now the count and the rate can be
* derived</td>
* </tr>
* <tr>
* <td>queryRate</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_SCAN_RESULTS}</td>
* <td>Gauge</td>
* <td>Prior to 2.1.0 this metric was reported as a rate, it is now the count and the rate can be
* derived</td>
* </tr>
* <tr>
* <td>queryByteRate</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_SCAN_RESULTS_BYTES}</td>
* <td>Gauge</td>
* <td>Prior to 2.1.0 this metric was reported as a rate, it is now the count and the rate can be
* derived</td>
* </tr>
* <tr>
* <td>ingestRate</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_INGEST_MUTATIONS}</td>
* <td>Gauge</td>
* <td>Prior to 2.1.0 this metric was reported as a rate, it is now the count and the rate can be
* derived</td>
* </tr>
* <tr>
* <td>ingestByteRate</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_INGEST_BYTES}</td>
* <td>Gauge</td>
* <td>Prior to 2.1.0 this metric was reported as a rate, it is now the count and the rate can be
* derived</td>
* </tr>
* <tr>
* <td>holdTime</td>
* <td>Gauge</td>
* <td>{@link #METRICS_TSERVER_HOLD}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <!-- scans -->
* <tr>
* <td>scan</td>
* <td>Stat</td>
* <td>{@link #METRICS_SCAN_TIMES}</td>
* <td>Timer</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_SCAN_OPEN_FILES}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>result</td>
* <td>Stat</td>
* <td>{@link #METRICS_SCAN_RESULTS}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>yield</td>
* <td>Stat</td>
* <td>{@link #METRICS_SCAN_YIELDS}</td>
* <td>Gauge</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_SCAN_START}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_SCAN_CONTINUE}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_SCAN_CLOSE}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_SCAN_BUSY_TIMEOUT}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_SCAN_PAUSED_FOR_MEM}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_SCAN_RETURN_FOR_MEM}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <!-- major compactions -->
* <tr>
* <td>{i|e}_{compactionServiceName}_{executor_name}_queued</td>
* <td>Gauge</td>
* <td>{@link #METRICS_MAJC_QUEUED}</td>
* <td>Gauge</td>
* <td>The compaction service information is in a tag:
* id={i|e}_{compactionServiceName}_{executor_name}</td>
* </tr>
* <tr>
* <td>{i|e}_{compactionServiceName}_{executor_name}_running</td>
* <td>Gauge</td>
* <td>{@link #METRICS_MAJC_RUNNING}</td>
* <td>Gauge</td>
* <td>The compaction service information is in a tag:
* id={i|e}_{compactionServiceName}_{executor_name}</td>
* </tr>
* <tr>
* <td></td>
* <td></td>
* <td>{@link #METRICS_MAJC_PAUSED}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <!-- minor compactions -->
* <tr>
* <td>Queue</td>
* <td>Stat</td>
* <td>{@link #METRICS_MINC_QUEUED}</td>
* <td>Timer</td>
* <td></td>
* </tr>
* <tr>
* <td>Minc</td>
* <td>Stat</td>
* <td>{@link #METRICS_MINC_RUNNING}</td>
* <td>Timer</td>
* <td></td>
* </tr>
* <tr>
* <td></td>
* <td></td>
* <td>{@link #METRICS_MINC_PAUSED}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <!-- Updates (ingest) -->
* <tr>
* <td>permissionErrors</td>
* <td>Counter</td>
* <td>{@link #METRICS_UPDATE_ERRORS}</td>
* <td>Gauge</td>
* <td>Type is stored in tag: type=permission</td>
* </tr>
* <tr>
* <td>unknownTabletErrors</td>
* <td>Counter</td>
* <td>{@link #METRICS_UPDATE_ERRORS}</td>
* <td>Gauge</td>
* <td>Type is stored in tag: type=unknown.tablet</td>
* </tr>
* <tr>
* <td>constraintViolations</td>
* <td>Counter</td>
* <td>{@link #METRICS_UPDATE_ERRORS}</td>
* <td>Gauge</td>
* <td>Type is stored in tag: type=constraint.violation</td>
* </tr>
* <tr>
* <td>commitPrep</td>
* <td>Stat</td>
* <td>{@link #METRICS_UPDATE_COMMIT_PREP}</td>
* <td>Timer</td>
* <td></td>
* </tr>
* <tr>
* <td>commitTime</td>
* <td>Stat</td>
* <td>{@link #METRICS_UPDATE_COMMIT}</td>
* <td>Timer</td>
* <td></td>
* </tr>
* <tr>
* <td>waLogWriteTime</td>
* <td>Stat</td>
* <td>{@link #METRICS_UPDATE_WALOG_WRITE}</td>
* <td>Timer</td>
* <td></td>
* </tr>
* <tr>
* <td>mutationArraysSize</td>
* <td>Stat</td>
* <td>{@link #METRICS_UPDATE_MUTATION_ARRAY_SIZE}</td>
* <td>Distribution Summary</td>
* <td></td>
* </tr>
* <!-- Thrift -->
* <tr>
* <td>idle</td>
* <td>Stat</td>
* <td>{@link #METRICS_THRIFT_IDLE}</td>
* <td>Distribution Summary</td>
* <td></td>
* </tr>
* <tr>
* <td>execute</td>
* <td>Stat</td>
* <td>{@link #METRICS_THRIFT_EXECUTE}</td>
* <td>Distribution Summary</td>
* <td></td>
* </tr>
* <!-- ZooKeeper property cache -->
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_PROPSTORE_LOAD_TIMER}</td>
* <td>Timer</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_PROPSTORE_REFRESH_COUNT}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_PROPSTORE_REFRESH_LOAD_COUNT}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_PROPSTORE_EVICTION_COUNT}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* <tr>
* <td>N/A</td>
* <td>N/A</td>
* <td>{@link #METRICS_PROPSTORE_ZK_ERROR_COUNT}</td>
* <td>Counter</td>
* <td></td>
* </tr>
* </table>
*
* @since 2.1.0
*/
public interface MetricsProducer {
Logger LOG = LoggerFactory.getLogger(MetricsProducer.class);
String METRICS_LOW_MEMORY = "accumulo.detected.low.memory";
String METRICS_COMPACTOR_PREFIX = "accumulo.compactor.";
String METRICS_COMPACTOR_MAJC_STUCK = METRICS_COMPACTOR_PREFIX + "majc.stuck";
String METRICS_FATE_PREFIX = "accumulo.fate.";
String METRICS_FATE_TYPE_IN_PROGRESS = METRICS_FATE_PREFIX + "ops.in.progress.by.type";
String METRICS_FATE_TOTAL_IN_PROGRESS = METRICS_FATE_PREFIX + "ops.in.progress";
String METRICS_FATE_OPS_ACTIVITY = METRICS_FATE_PREFIX + "ops.activity";
String METRICS_FATE_ERRORS = METRICS_FATE_PREFIX + "errors";
String METRICS_FATE_TX = METRICS_FATE_PREFIX + "tx";
String METRICS_GC_PREFIX = "accumulo.gc.";
String METRICS_GC_STARTED = METRICS_GC_PREFIX + "started";
String METRICS_GC_FINISHED = METRICS_GC_PREFIX + "finished";
String METRICS_GC_CANDIDATES = METRICS_GC_PREFIX + "candidates";
String METRICS_GC_IN_USE = METRICS_GC_PREFIX + "in.use";
String METRICS_GC_DELETED = METRICS_GC_PREFIX + "deleted";
String METRICS_GC_ERRORS = METRICS_GC_PREFIX + "errors";
String METRICS_GC_WAL_STARTED = METRICS_GC_PREFIX + "wal.started";
String METRICS_GC_WAL_FINISHED = METRICS_GC_PREFIX + "wal.finished";
String METRICS_GC_WAL_CANDIDATES = METRICS_GC_PREFIX + "wal.candidates";
String METRICS_GC_WAL_IN_USE = METRICS_GC_PREFIX + "wal.in.use";
String METRICS_GC_WAL_DELETED = METRICS_GC_PREFIX + "wal.deleted";
String METRICS_GC_WAL_ERRORS = METRICS_GC_PREFIX + "wal.errors";
String METRICS_GC_POST_OP_DURATION = METRICS_GC_PREFIX + "post.op.duration";
String METRICS_GC_RUN_CYCLE = METRICS_GC_PREFIX + "run.cycle";
String METRICS_MAJC_PREFIX = "accumulo.tserver.compactions.majc.";
String METRICS_MAJC_QUEUED = METRICS_MAJC_PREFIX + "queued";
String METRICS_MAJC_RUNNING = METRICS_MAJC_PREFIX + "running";
String METRICS_MAJC_PAUSED = METRICS_MAJC_PREFIX + "paused";
String METRICS_MINC_PREFIX = "accumulo.tserver.compactions.minc.";
String METRICS_MINC_QUEUED = METRICS_MINC_PREFIX + "queued";
String METRICS_MINC_RUNNING = METRICS_MINC_PREFIX + "running";
String METRICS_MINC_PAUSED = METRICS_MINC_PREFIX + "paused";
String METRICS_SCAN_PREFIX = "accumulo.tserver.scans.";
String METRICS_SCAN_TIMES = METRICS_SCAN_PREFIX + "times";
String METRICS_SCAN_OPEN_FILES = METRICS_SCAN_PREFIX + "files.open";
String METRICS_SCAN_RESULTS = METRICS_SCAN_PREFIX + "result";
String METRICS_SCAN_YIELDS = METRICS_SCAN_PREFIX + "yields";
String METRICS_SCAN_START = METRICS_SCAN_PREFIX + "start";
String METRICS_SCAN_CONTINUE = METRICS_SCAN_PREFIX + "continue";
String METRICS_SCAN_CLOSE = METRICS_SCAN_PREFIX + "close";
String METRICS_SCAN_BUSY_TIMEOUT = METRICS_SCAN_PREFIX + "busy.timeout";
String METRICS_SCAN_PAUSED_FOR_MEM = METRICS_SCAN_PREFIX + ".paused.for.memory";
String METRICS_SCAN_RETURN_FOR_MEM = METRICS_SCAN_PREFIX + ".return.early.for.memory";
String METRICS_TSERVER_PREFIX = "accumulo.tserver.";
String METRICS_TSERVER_ENTRIES = METRICS_TSERVER_PREFIX + "entries";
String METRICS_TSERVER_MEM_ENTRIES = METRICS_TSERVER_PREFIX + "entries.mem";
String METRICS_TSERVER_MAJC_QUEUED = METRICS_TSERVER_PREFIX + "majc.queued";
String METRICS_TSERVER_MAJC_RUNNING = METRICS_TSERVER_PREFIX + "majc.running";
String METRICS_TSERVER_MAJC_STUCK = METRICS_TSERVER_PREFIX + "majc.stuck";
String METRICS_TSERVER_MINC_QUEUED = METRICS_TSERVER_PREFIX + "minc.queued";
String METRICS_TSERVER_MINC_RUNNING = METRICS_TSERVER_PREFIX + "minc.running";
String METRICS_TSERVER_MINC_TOTAL = METRICS_TSERVER_PREFIX + "minc.total";
String METRICS_TSERVER_TABLETS_LONG_ASSIGNMENTS =
METRICS_TSERVER_PREFIX + "tablets.assignments.warning";
String METRICS_TSERVER_TABLETS_ONLINE = METRICS_TSERVER_PREFIX + "tablets.online";
String METRICS_TSERVER_TABLETS_OPENING = METRICS_TSERVER_PREFIX + "tablets.opening";
String METRICS_TSERVER_TABLETS_UNOPENED = METRICS_TSERVER_PREFIX + "tablets.unopened";
String METRICS_TSERVER_QUERIES = METRICS_TSERVER_PREFIX + "queries";
String METRICS_TSERVER_TABLETS_FILES = METRICS_TSERVER_PREFIX + "tablets.files";
String METRICS_TSERVER_HOLD = METRICS_TSERVER_PREFIX + "hold";
String METRICS_TSERVER_INGEST_MUTATIONS = METRICS_TSERVER_PREFIX + "ingest.mutations";
String METRICS_TSERVER_INGEST_BYTES = METRICS_TSERVER_PREFIX + "ingest.bytes";
String METRICS_TSERVER_SCAN_RESULTS = METRICS_TSERVER_PREFIX + "scan.results";
String METRICS_TSERVER_SCAN_RESULTS_BYTES = METRICS_TSERVER_PREFIX + "scan.results.bytes";
String METRICS_TSERVER_SCANNED_ENTRIES = METRICS_TSERVER_PREFIX + "scan.scanned.entries";
String METRICS_THRIFT_PREFIX = "accumulo.thrift.";
String METRICS_THRIFT_EXECUTE = METRICS_THRIFT_PREFIX + "execute";
String METRICS_THRIFT_IDLE = METRICS_THRIFT_PREFIX + "idle";
String METRICS_UPDATE_PREFIX = "accumulo.tserver.updates.";
String METRICS_UPDATE_ERRORS = METRICS_UPDATE_PREFIX + "error";
String METRICS_UPDATE_COMMIT = METRICS_UPDATE_PREFIX + "commit";
String METRICS_UPDATE_COMMIT_PREP = METRICS_UPDATE_COMMIT + ".prep";
String METRICS_UPDATE_WALOG_WRITE = METRICS_UPDATE_PREFIX + "walog.write";
String METRICS_UPDATE_MUTATION_ARRAY_SIZE = METRICS_UPDATE_PREFIX + "mutation.arrays.size";
String METRICS_PROPSTORE_PREFIX = "accumulo.prop.store.";
String METRICS_PROPSTORE_LOAD_TIMER = METRICS_PROPSTORE_PREFIX + "load";
String METRICS_PROPSTORE_REFRESH_COUNT = METRICS_PROPSTORE_PREFIX + "refresh";
String METRICS_PROPSTORE_REFRESH_LOAD_COUNT = METRICS_PROPSTORE_PREFIX + "refresh.load";
String METRICS_PROPSTORE_EVICTION_COUNT = METRICS_PROPSTORE_PREFIX + "evictions";
String METRICS_PROPSTORE_ZK_ERROR_COUNT = METRICS_PROPSTORE_PREFIX + "zookeeper.error";
/**
* Build Micrometer Meter objects and register them with the registry
*/
void registerMetrics(MeterRegistry registry);
/**
* Returns a new mutable mapping of metric field value to metric field name.
*
* @return map of field names to variable names.
*/
default Map<String,String> getMetricFields() {
Map<String,String> fields = new HashMap<>();
for (Field f : MetricsProducer.class.getDeclaredFields()) {
if (Modifier.isStatic(f.getModifiers()) && f.getType().equals(String.class)
&& !f.getName().contains("PREFIX")) {
try {
fields.put((String) f.get(MetricsProducer.class), f.getName());
} catch (IllegalArgumentException | IllegalAccessException e) {
// this shouldn't happen, but let's log it anyway
LOG.error("Error getting metric value for field: " + f.getName());
}
}
}
return fields;
}
}
| 9,521 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/metrics/MeterRegistryFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.metrics;
import io.micrometer.core.instrument.MeterRegistry;
public interface MeterRegistryFactory {
MeterRegistry create();
}
| 9,522 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/metrics/MetricsUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.metrics;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutorService;
import org.apache.accumulo.core.classloader.ClassLoaderUtil;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.net.HostAndPort;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Metrics;
import io.micrometer.core.instrument.Tag;
import io.micrometer.core.instrument.binder.jvm.ClassLoaderMetrics;
import io.micrometer.core.instrument.binder.jvm.ExecutorServiceMetrics;
import io.micrometer.core.instrument.binder.jvm.JvmGcMetrics;
import io.micrometer.core.instrument.binder.jvm.JvmMemoryMetrics;
import io.micrometer.core.instrument.binder.jvm.JvmThreadMetrics;
import io.micrometer.core.instrument.binder.system.ProcessorMetrics;
public class MetricsUtil {
private static final Logger LOG = LoggerFactory.getLogger(MetricsUtil.class);
private static JvmGcMetrics gc;
private static List<Tag> commonTags;
public static void initializeMetrics(final AccumuloConfiguration conf, final String appName,
final HostAndPort address) throws ClassNotFoundException, InstantiationException,
IllegalAccessException, IllegalArgumentException, InvocationTargetException,
NoSuchMethodException, SecurityException {
initializeMetrics(conf.getBoolean(Property.GENERAL_MICROMETER_ENABLED),
conf.getBoolean(Property.GENERAL_MICROMETER_JVM_METRICS_ENABLED),
conf.get(Property.GENERAL_MICROMETER_FACTORY), appName, address);
}
private static void initializeMetrics(boolean enabled, boolean jvmMetricsEnabled,
String factoryClass, String appName, HostAndPort address) throws ClassNotFoundException,
InstantiationException, IllegalAccessException, IllegalArgumentException,
InvocationTargetException, NoSuchMethodException, SecurityException {
LOG.info("initializing metrics, enabled:{}, class:{}", enabled, factoryClass);
if (enabled && factoryClass != null && !factoryClass.isEmpty()) {
String processName = appName;
String serviceInstance = System.getProperty("accumulo.metrics.service.instance", "");
if (!serviceInstance.isBlank()) {
processName += serviceInstance;
}
List<Tag> tags = new ArrayList<>();
tags.add(Tag.of("process.name", processName));
if (address != null) {
if (!address.getHost().isEmpty()) {
tags.add(Tag.of("host", address.getHost()));
}
if (address.getPort() > 0) {
tags.add(Tag.of("port", Integer.toString(address.getPort())));
}
}
commonTags = Collections.unmodifiableList(tags);
Class<? extends MeterRegistryFactory> clazz =
ClassLoaderUtil.loadClass(factoryClass, MeterRegistryFactory.class);
MeterRegistryFactory factory = clazz.getDeclaredConstructor().newInstance();
MeterRegistry registry = factory.create();
registry.config().commonTags(commonTags);
Metrics.addRegistry(registry);
if (jvmMetricsEnabled) {
new ClassLoaderMetrics(commonTags).bindTo(Metrics.globalRegistry);
new JvmMemoryMetrics(commonTags).bindTo(Metrics.globalRegistry);
gc = new JvmGcMetrics(commonTags);
gc.bindTo(Metrics.globalRegistry);
new ProcessorMetrics(commonTags).bindTo(Metrics.globalRegistry);
new JvmThreadMetrics(commonTags).bindTo(Metrics.globalRegistry);
}
}
}
public static void initializeProducers(MetricsProducer... producer) {
for (MetricsProducer p : producer) {
p.registerMetrics(Metrics.globalRegistry);
LOG.info("Metric producer {} initialize", p.getClass().getSimpleName());
}
}
public static void addExecutorServiceMetrics(ExecutorService executor, String name) {
new ExecutorServiceMetrics(executor, name, commonTags).bindTo(Metrics.globalRegistry);
}
public static List<Tag> getCommonTags() {
return commonTags;
}
public static void close() {
if (gc != null) {
gc.close();
}
}
}
| 9,523 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto/CryptoUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.crypto;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Objects;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.crypto.CryptoEnvironment;
import org.apache.accumulo.core.spi.crypto.CryptoService;
import org.apache.accumulo.core.spi.crypto.FileDecrypter;
import org.apache.commons.io.IOUtils;
public class CryptoUtils {
/**
* Read the decryption parameters from the DataInputStream
*/
public static byte[] readParams(DataInputStream in) throws IOException {
Objects.requireNonNull(in);
int len = in.readInt();
byte[] decryptionParams = new byte[len];
IOUtils.readFully(in, decryptionParams);
return decryptionParams;
}
/**
* Read the decryption parameters from the DataInputStream and get the FileDecrypter associated
* with the provided CryptoService and CryptoEnvironment.Scope.
*/
public static FileDecrypter getFileDecrypter(CryptoService cs, CryptoEnvironment.Scope scope,
TableId tableId, DataInputStream in) throws IOException {
return cs.getFileDecrypter(getCryptoEnv(scope, tableId, in));
}
public static CryptoEnvironment getCryptoEnv(CryptoEnvironment.Scope scope, TableId tableId,
DataInputStream in) throws IOException {
byte[] decryptionParams = readParams(in);
return new CryptoEnvironmentImpl(scope, tableId, decryptionParams);
}
/**
* Write the decryption parameters to the DataOutputStream
*/
public static void writeParams(byte[] decryptionParams, DataOutputStream out) throws IOException {
Objects.requireNonNull(decryptionParams);
Objects.requireNonNull(out);
out.writeInt(decryptionParams.length);
out.write(decryptionParams);
}
}
| 9,524 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto/CryptoEnvironmentImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.crypto;
import java.util.Objects;
import java.util.Optional;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.crypto.CryptoEnvironment;
import edu.umd.cs.findbugs.annotations.Nullable;
/**
* @since 2.0
*/
public class CryptoEnvironmentImpl implements CryptoEnvironment {
private final Scope scope;
private final TableId tableId;
private final byte[] decryptionParams;
/**
* Construct the crypto environment. The decryptionParams can be null.
*/
public CryptoEnvironmentImpl(Scope scope, @Nullable TableId tableId,
@Nullable byte[] decryptionParams) {
this.scope = Objects.requireNonNull(scope);
this.tableId = tableId;
this.decryptionParams = decryptionParams;
}
public CryptoEnvironmentImpl(Scope scope) {
this.scope = scope;
this.tableId = null;
this.decryptionParams = null;
}
@Override
public Scope getScope() {
return scope;
}
@Override
public Optional<TableId> getTableId() {
return Optional.ofNullable(tableId);
}
@Override
public Optional<byte[]> getDecryptionParams() {
return Optional.ofNullable(decryptionParams);
}
@Override
public String toString() {
String str = scope + " tableId=" + tableId + " decryptParams.length=";
if (decryptionParams == null) {
str += 0;
} else {
str += decryptionParams.length;
}
return str;
}
}
| 9,525 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto/CryptoFactoryLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.crypto;
import static org.apache.accumulo.core.crypto.CryptoFactoryLoader.ClassloaderType.ACCUMULO;
import static org.apache.accumulo.core.crypto.CryptoFactoryLoader.ClassloaderType.JAVA;
import static org.apache.accumulo.core.spi.crypto.CryptoEnvironment.Scope.TABLE;
import java.util.Map;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.crypto.CryptoEnvironment;
import org.apache.accumulo.core.spi.crypto.CryptoService;
import org.apache.accumulo.core.spi.crypto.CryptoServiceFactory;
import org.apache.accumulo.core.spi.crypto.GenericCryptoServiceFactory;
import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CryptoFactoryLoader {
private static final Logger log = LoggerFactory.getLogger(CryptoFactoryLoader.class);
private static final CryptoServiceFactory NO_CRYPTO_FACTORY = new NoCryptoServiceFactory();
enum ClassloaderType {
// Use the Accumulo custom classloader. Should only be used by Accumulo server side code.
ACCUMULO,
// Use basic Java classloading mechanism. Should be use by Accumulo client code.
JAVA
}
/**
* Creates a new server Factory.
*/
public static CryptoServiceFactory newInstance(AccumuloConfiguration conf) {
String clazzName = conf.get(Property.INSTANCE_CRYPTO_FACTORY);
return loadCryptoFactory(ACCUMULO, clazzName);
}
/**
* For use by server utilities not associated with a table. Requires Instance, general and table
* configuration. Creates a new Factory from the configuration and gets the CryptoService from
* that Factory.
*/
public static CryptoService getServiceForServer(AccumuloConfiguration conf) {
var env = new CryptoEnvironmentImpl(TABLE, null, null);
CryptoServiceFactory factory = newInstance(conf);
var allCryptoProperties = conf.getAllCryptoProperties();
return factory.getService(env, allCryptoProperties);
}
/**
* Returns a CryptoService configured for the scope using the properties. This is used for client
* operations not associated with a table, either for r-files (TABLE scope) or WALs. The
* GenericCryptoServiceFactory is used for loading the CryptoService.
*/
public static CryptoService getServiceForClient(CryptoEnvironment.Scope scope,
Map<String,String> properties) {
var factory = loadCryptoFactory(JAVA, GenericCryptoServiceFactory.class.getName());
CryptoEnvironment env = new CryptoEnvironmentImpl(scope, null, null);
return factory.getService(env, properties);
}
/**
* For use by client code, in a Table context.
*/
public static CryptoService getServiceForClientWithTable(Map<String,String> systemConfig,
Map<String,String> tableProps, TableId tableId) {
String factoryKey = Property.INSTANCE_CRYPTO_FACTORY.getKey();
String clazzName = systemConfig.get(factoryKey);
if (clazzName == null || clazzName.trim().isEmpty()) {
return NoCryptoServiceFactory.NONE;
}
var env = new CryptoEnvironmentImpl(TABLE, tableId, null);
CryptoServiceFactory factory = loadCryptoFactory(JAVA, clazzName);
return factory.getService(env, tableProps);
}
private static CryptoServiceFactory loadCryptoFactory(ClassloaderType ct, String clazzName) {
log.debug("Creating new crypto factory class {}", clazzName);
CryptoServiceFactory newCryptoServiceFactory;
if (ct == ACCUMULO) {
newCryptoServiceFactory = ConfigurationTypeHelper.getClassInstance(null, clazzName,
CryptoServiceFactory.class, new NoCryptoServiceFactory());
} else if (ct == JAVA) {
if (clazzName == null || clazzName.trim().isEmpty()) {
newCryptoServiceFactory = NO_CRYPTO_FACTORY;
} else {
try {
newCryptoServiceFactory = CryptoFactoryLoader.class.getClassLoader().loadClass(clazzName)
.asSubclass(CryptoServiceFactory.class).getDeclaredConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
} else {
throw new IllegalArgumentException();
}
return newCryptoServiceFactory;
}
}
| 9,526 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto/streams/NoFlushOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.crypto.streams;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
public class NoFlushOutputStream extends DataOutputStream {
public NoFlushOutputStream(OutputStream out) {
super(out);
}
/**
* It is very important to override this method!! The underlying method from FilterOutputStream
* calls write a single byte at a time and will kill performance.
*/
@Override
public void write(byte[] b, int off, int len) throws IOException {
out.write(b, off, len);
}
@Override
public void flush() {}
}
| 9,527 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto/streams/RFileCipherOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.crypto.streams;
import java.io.IOException;
import java.io.OutputStream;
import javax.crypto.Cipher;
import javax.crypto.CipherOutputStream;
/**
* This class extends {@link CipherOutputStream} to include a way to track the number of bytes that
* have been encrypted by the stream. The write method also includes a mechanism to stop writing and
* throw an exception if exceeding a maximum number of bytes is attempted.
*/
public class RFileCipherOutputStream extends CipherOutputStream {
// This is the maximum size encrypted stream that can be written. Attempting to write anything
// larger
// will cause an exception. Given that each block in an rfile is encrypted separately, and blocks
// should be written such that a block cannot ever reach 16GiB, this is believed to be a safe
// number.
// If this does cause an exception, it is an issue best addressed elsewhere.
private final long maxOutputSize = 1L << 34; // 16GiB
// The total number of bytes that have been written out
private long count = 0;
/**
*
* Constructs a RFileCipherOutputStream
*
* @param os the OutputStream object
* @param c an initialized Cipher object
*/
public RFileCipherOutputStream(OutputStream os, Cipher c) {
super(os, c);
}
/**
* Override of CipherOutputStream's write to count the number of bytes that have been encrypted.
* This method now throws an exception if an attempt to write bytes beyond a maximum is made.
*/
@Override
public void write(byte[] b, int off, int len) throws IOException {
count += len;
if (count > maxOutputSize) {
throw new IOException("Attempt to write " + count + " bytes was made. A maximum of "
+ maxOutputSize + " is allowed for an encryption stream.");
}
super.write(b, off, len);
}
@Override
public void write(byte[] b) throws IOException {
write(b, 0, b.length);
}
/**
* Override of CipherOutputStream's write for a single byte to count it. This method now throws an
* exception if an attempt to write bytes beyond a maximum is made.
*/
@Override
public void write(int b) throws IOException {
count++;
if (count > maxOutputSize) {
throw new IOException("Attempt to write " + count + " bytes was made. A maximum of "
+ maxOutputSize + " is allowed for an encryption stream.");
}
super.write(b);
}
}
| 9,528 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto/streams/BlockedInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.crypto.streams;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
/**
* Reader corresponding to BlockedOutputStream. Expects all data to be in the form of size (int)
* data (size bytes) junk (however many bytes it takes to complete a block)
*/
public class BlockedInputStream extends InputStream {
byte[] array;
// ReadPos is where to start reading
// WritePos is the last position written to
int readPos, writePos;
DataInputStream in;
int blockSize;
boolean finished = false;
public BlockedInputStream(InputStream in, int blockSize, int maxSize) {
if (blockSize == 0) {
throw new IllegalArgumentException("Invalid block size");
}
if (in instanceof DataInputStream) {
this.in = (DataInputStream) in;
} else {
this.in = new DataInputStream(in);
}
array = new byte[maxSize];
readPos = 0;
writePos = -1;
this.blockSize = blockSize;
}
@Override
public int read() throws IOException {
if (remaining() > 0) {
return (array[readAndIncrement(1)] & 0xFF);
}
return -1;
}
private int readAndIncrement(int toAdd) {
int toRet = readPos;
readPos += toAdd;
if (readPos == array.length) {
readPos = 0;
} else if (readPos > array.length) {
throw new IllegalStateException(
"Unexpected state, this should only ever increase or cycle on the boundary!");
}
return toRet;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int toCopy = Math.min(len, remaining());
if (toCopy > 0) {
System.arraycopy(array, readPos, b, off, toCopy);
readAndIncrement(toCopy);
}
return toCopy;
}
private int remaining() throws IOException {
if (finished) {
return -1;
}
if (available() == 0) {
refill();
}
return available();
}
// Amount available to read
@Override
public int available() {
int toRet = writePos + 1 - readPos;
if (toRet < 0) {
toRet += array.length;
}
return Math.min(array.length - readPos, toRet);
}
private boolean refill() throws IOException {
if (finished) {
return false;
}
int size;
try {
size = in.readInt();
} catch (EOFException eof) {
finished = true;
return false;
}
// Shortcut for if we're reading garbage data
if (size < 0 || size > array.length) {
finished = true;
return false;
} else if (size == 0) {
throw new IllegalStateException(
"Empty block written, this shouldn't happen with this BlockedOutputStream.");
}
// We have already checked, not concerned with looping the buffer here
int bufferAvailable = array.length - readPos;
if (size > bufferAvailable) {
in.readFully(array, writePos + 1, bufferAvailable);
in.readFully(array, 0, size - bufferAvailable);
} else {
in.readFully(array, writePos + 1, size);
}
writePos += size;
if (writePos >= array.length - 1) {
writePos -= array.length;
}
// Skip the cruft
int remainder = blockSize - ((size + 4) % blockSize);
if (remainder != blockSize) {
// If remainder isn't spilling the rest of the block, we know it's incomplete.
if (in.available() < remainder) {
undoWrite(size);
return false;
}
in.skip(remainder);
}
return true;
}
private void undoWrite(int size) {
writePos = writePos - size;
if (writePos < -1) {
writePos += array.length;
}
}
@Override
public long skip(long n) {
throw new UnsupportedOperationException();
// available(n);
// bb.position(bb.position()+(int)n);
}
@Override
public void close() throws IOException {
array = null;
in.close();
}
@Override
public synchronized void mark(int readlimit) {
throw new UnsupportedOperationException();
}
@Override
public synchronized void reset() throws IOException {
in.reset();
readPos = 0;
writePos = -1;
}
@Override
public boolean markSupported() {
return false;
}
}
| 9,529 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto/streams/BlockedOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.crypto.streams;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
/**
* Buffers all input in a growing buffer until flush() is called. Then entire buffer is written,
* with size information, and padding to force the underlying crypto output stream to also fully
* flush
*/
public class BlockedOutputStream extends OutputStream {
int blockSize;
DataOutputStream out;
ByteBuffer bb;
public BlockedOutputStream(OutputStream out, int blockSize, int bufferSize) {
if (bufferSize <= 0) {
throw new IllegalArgumentException("bufferSize must be greater than 0.");
}
if (out instanceof DataOutputStream) {
this.out = (DataOutputStream) out;
} else {
this.out = new DataOutputStream(out);
}
this.blockSize = blockSize;
int remainder = bufferSize % blockSize;
if (remainder != 0) {
remainder = blockSize - remainder;
}
// some buffer space + bytes to make the buffer evened up with the cipher block size - 4 bytes
// for the size int
bb = ByteBuffer.allocate(bufferSize + remainder - 4);
}
@Override
public synchronized void flush() throws IOException {
if (!bb.hasArray()) {
throw new IllegalStateException("BlockedOutputStream has no backing array.");
}
int size = bb.position();
if (size == 0) {
return;
}
out.writeInt(size);
int remainder = ((size + 4) % blockSize);
if (remainder != 0) {
remainder = blockSize - remainder;
}
// This is garbage
bb.position(size + remainder);
out.write(bb.array(), 0, size + remainder);
out.flush();
bb.rewind();
}
@Override
public void write(int b) throws IOException {
// Checking before provides same functionality but causes the case of previous flush() failing
// to now throw a buffer out of bounds error
if (bb.remaining() == 0) {
flush();
}
bb.put((byte) b);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
// Can't recurse here in case the len is large and the blocksize is small (and the stack is
// small)
// So we'll just fill up the buffer over and over
while (len >= bb.remaining()) {
int remaining = bb.remaining();
bb.put(b, off, remaining);
// This is guaranteed to have the buffer filled, so we'll just flush it. No check needed
flush();
off += remaining;
len -= remaining;
}
// And then write the remainder (and this is guaranteed to not fill the buffer, so we won't
// flush afterward
bb.put(b, off, len);
}
@Override
public void write(byte[] b) throws IOException {
write(b, 0, b.length);
}
@Override
public void close() throws IOException {
flush();
out.close();
}
}
| 9,530 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/crypto/streams/DiscardCloseOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.crypto.streams;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DiscardCloseOutputStream extends FilterOutputStream {
private static final Logger log = LoggerFactory.getLogger(DiscardCloseOutputStream.class);
public DiscardCloseOutputStream(OutputStream out) {
super(out);
}
/**
* It is very important to override this method!! The underlying method from FilterOutputStream
* calls write a single byte at a time and will kill performance.
*/
@Override
public void write(byte[] b, int off, int len) throws IOException {
out.write(b, off, len);
}
@Override
public void close() throws IOException {
// Discard
log.trace("Discarded close");
}
}
| 9,531 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/singletons/SingletonManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.singletons;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* This class automates management of static singletons that maintain state for Accumulo clients.
* Historically, Accumulo client code that used Connector had no control over these singletons. The
* new AccumuloClient API that replaces Connector is closeable. When all AccumuloClients are closed
* then resources used by the singletons are released. This class coordinates releasing those
* resources.
*
* <p>
* This class is intermediate solution to resource management. Ideally there would be no static
* state and AccumuloClients would own all of their state and clean it up on close. If
* AccumuloClient is not closable at inception, then it is harder to make it closable later. If
* AccumuloClient is not closable, then its hard to remove the static state. This class enables
* making AccumuloClient closable at inception so that static state can be removed later.
*
*/
public class SingletonManager {
private static final Logger log = LoggerFactory.getLogger(SingletonManager.class);
/**
* These enums determine the behavior of the SingletonManager.
*
*/
public enum Mode {
/**
* In this mode singletons are disabled when the number of active client reservations goes to
* zero.
*/
CLIENT,
/**
* In this mode singletons are never disabled, unless the CLOSED mode is entered.
*/
SERVER,
/**
* In this mode singletons are permanently disabled and entering this mode prevents
* transitioning to other modes.
*/
CLOSED
}
private static long reservations;
private static Mode mode;
private static boolean enabled;
private static List<SingletonService> services;
@VisibleForTesting
static void reset() {
reservations = 0;
mode = Mode.CLIENT;
enabled = true;
services = new ArrayList<>();
}
static {
reset();
}
private static void enable(SingletonService service) {
try {
service.enable();
} catch (RuntimeException e) {
log.error("Failed to enable singleton service", e);
}
}
private static void disable(SingletonService service) {
try {
service.disable();
} catch (RuntimeException e) {
log.error("Failed to disable singleton service", e);
}
}
/**
* Register a static singleton that should be disabled and enabled as needed.
*/
public static synchronized void register(SingletonService service) {
if (enabled && !service.isEnabled()) {
enable(service);
}
if (!enabled && service.isEnabled()) {
disable(service);
}
services.add(service);
}
/**
* This method should be called when creating Accumulo clients using the public API. Accumulo
* clients created internally within Accumulo code should probably call
* {@link SingletonReservation#noop()} instead. While a client holds a reservation, singleton
* services are enabled.
*
* @return A reservation that must be closed when the AccumuloClient is closed.
*/
public static synchronized SingletonReservation getClientReservation() {
Preconditions.checkState(reservations >= 0);
reservations++;
transition();
return new SingletonReservation();
}
static synchronized void releaseReservation() {
Preconditions.checkState(reservations > 0);
reservations--;
transition();
}
@VisibleForTesting
public static long getReservationCount() {
return reservations;
}
/**
* Change how singletons are managed. The default mode is {@link Mode#CLIENT}
*/
public static synchronized void setMode(Mode mode) {
if (SingletonManager.mode == mode) {
return;
}
if (SingletonManager.mode == Mode.CLOSED) {
throw new IllegalStateException("Cannot leave closed mode once entered");
}
/*
* Always allow transition to closed and only allow transition to client/connector when the
* current mode is not server.
*/
if (SingletonManager.mode != Mode.SERVER || mode == Mode.CLOSED) {
SingletonManager.mode = mode;
}
transition();
}
@VisibleForTesting
public static synchronized Mode getMode() {
return mode;
}
private static void transition() {
if (enabled) {
// if we're in an enabled state AND
// the mode is CLOSED or there are no active clients,
// then disable everything
if (mode == Mode.CLOSED || (mode == Mode.CLIENT && reservations == 0)) {
services.forEach(SingletonManager::disable);
enabled = false;
}
} else {
// if we're in a disabled state AND
// the mode is SERVER or if there are active clients,
// then enable everything
if (mode == Mode.SERVER || (mode == Mode.CLIENT && reservations > 0)) {
services.forEach(SingletonManager::enable);
enabled = true;
}
}
}
}
| 9,532 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/singletons/SingletonReservation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.singletons;
import java.lang.ref.Cleaner.Cleanable;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.util.cleaner.CleanerUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @see SingletonManager#getClientReservation()
*/
public class SingletonReservation implements AutoCloseable {
private static final Logger log = LoggerFactory.getLogger(SingletonReservation.class);
// AtomicBoolean so cleaner doesn't need to synchronize to reliably read
private final AtomicBoolean closed = new AtomicBoolean(false);
private final Cleanable cleanable;
public SingletonReservation() {
cleanable = CleanerUtil.unclosed(this, AccumuloClient.class, closed, log, null);
}
@Override
public void close() {
if (closed.compareAndSet(false, true)) {
// deregister cleanable, but it won't run because it checks
// the value of closed first, which is now true
cleanable.clean();
SingletonManager.releaseReservation();
}
}
private static class NoopSingletonReservation extends SingletonReservation {
NoopSingletonReservation() {
super.closed.set(true);
// deregister the cleaner
super.cleanable.clean();
}
}
private static final SingletonReservation NOOP = new NoopSingletonReservation();
/**
* @return A reservation where the close method is a no-op.
*/
public static SingletonReservation noop() {
return NOOP;
}
}
| 9,533 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/singletons/SingletonService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.singletons;
/**
* The {@link SingletonManager} uses this interface to enable and disable singleton services.
*
* @see SingletonManager#register(SingletonService)
*/
public interface SingletonService {
public boolean isEnabled();
public void enable();
public void disable();
}
| 9,534 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/IteratorConfigUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl;
import static java.util.Objects.requireNonNull;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import org.apache.accumulo.core.classloader.ClassLoaderUtil;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.data.constraints.DefaultKeySizeConstraint;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.user.VersioningIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility class for configuring iterators. These methods were moved from IteratorUtil so that it
* could be treated as API.
*/
public class IteratorConfigUtil {
private static final Logger log = LoggerFactory.getLogger(IteratorConfigUtil.class);
public static final Comparator<IterInfo> ITER_INFO_COMPARATOR =
Comparator.comparingInt(IterInfo::getPriority);
/**
* Fetch the correct configuration key prefix for the given scope. Throws an
* IllegalArgumentException if no property exists for the given scope.
*/
public static Property getProperty(IteratorScope scope) {
requireNonNull(scope);
switch (scope) {
case scan:
return Property.TABLE_ITERATOR_SCAN_PREFIX;
case minc:
return Property.TABLE_ITERATOR_MINC_PREFIX;
case majc:
return Property.TABLE_ITERATOR_MAJC_PREFIX;
default:
throw new IllegalStateException("Could not find configuration property for IteratorScope");
}
}
/**
* Generate the initial (default) properties for a table
*
* @param limitVersion include a VersioningIterator at priority 20 that retains a single version
* of a given K/V pair.
* @return A map of Table properties
*/
public static Map<String,String> generateInitialTableProperties(boolean limitVersion) {
TreeMap<String,String> props = new TreeMap<>();
if (limitVersion) {
for (IteratorScope iterScope : IteratorScope.values()) {
props.put(Property.TABLE_ITERATOR_PREFIX + iterScope.name() + ".vers",
"20," + VersioningIterator.class.getName());
props.put(Property.TABLE_ITERATOR_PREFIX + iterScope.name() + ".vers.opt.maxVersions", "1");
}
}
props.put(Property.TABLE_CONSTRAINT_PREFIX + "1", DefaultKeySizeConstraint.class.getName());
return props;
}
public static List<IterInfo> parseIterConf(IteratorScope scope, List<IterInfo> iters,
Map<String,Map<String,String>> allOptions, AccumuloConfiguration conf) {
Map<String,String> properties = conf.getAllPropertiesWithPrefix(getProperty(scope));
ArrayList<IterInfo> iterators = new ArrayList<>(iters);
final Property scopeProperty = getProperty(scope);
final String scopePropertyKey = scopeProperty.getKey();
for (Entry<String,String> entry : properties.entrySet()) {
String suffix = entry.getKey().substring(scopePropertyKey.length());
String[] suffixSplit = suffix.split("\\.", 3);
if (suffixSplit.length == 1) {
String[] sa = entry.getValue().split(",");
int prio = Integer.parseInt(sa[0]);
String className = sa[1];
iterators.add(new IterInfo(prio, className, suffixSplit[0]));
} else if (suffixSplit.length == 3 && suffixSplit[1].equals("opt")) {
String iterName = suffixSplit[0];
String optName = suffixSplit[2];
allOptions.computeIfAbsent(iterName, k -> new HashMap<>()).put(optName, entry.getValue());
} else {
throw new IllegalArgumentException("Invalid iterator format: " + entry.getKey());
}
}
iterators.sort(ITER_INFO_COMPARATOR);
return iterators;
}
public static void mergeIteratorConfig(List<IterInfo> destList,
Map<String,Map<String,String>> destOpts, List<IterInfo> tableIters,
Map<String,Map<String,String>> tableOpts, List<IterInfo> ssi,
Map<String,Map<String,String>> ssio) {
destList.addAll(tableIters);
destList.addAll(ssi);
destList.sort(ITER_INFO_COMPARATOR);
Set<Entry<String,Map<String,String>>> es = tableOpts.entrySet();
for (Entry<String,Map<String,String>> entry : es) {
if (entry.getValue() == null) {
destOpts.put(entry.getKey(), null);
} else {
destOpts.put(entry.getKey(), new HashMap<>(entry.getValue()));
}
}
mergeOptions(ssio, destOpts);
}
private static void mergeOptions(Map<String,Map<String,String>> ssio,
Map<String,Map<String,String>> allOptions) {
ssio.forEach((k, v) -> {
if (v != null) {
Map<String,String> options = allOptions.get(k);
if (options == null) {
allOptions.put(k, v);
} else {
options.putAll(v);
}
}
});
}
public static IteratorBuilder.IteratorBuilderEnv loadIterConf(IteratorScope scope,
List<IterInfo> iters, Map<String,Map<String,String>> iterOpts, AccumuloConfiguration conf) {
Map<String,Map<String,String>> allOptions = new HashMap<>();
List<IterInfo> iterators = parseIterConf(scope, iters, allOptions, conf);
mergeOptions(iterOpts, allOptions);
return IteratorBuilder.builder(iterators).opts(allOptions);
}
/**
* Convert the list of iterators to IterInfo objects and then load the stack.
*/
public static SortedKeyValueIterator<Key,Value> convertItersAndLoad(IteratorScope scope,
SortedKeyValueIterator<Key,Value> source, AccumuloConfiguration conf,
List<IteratorSetting> iterators, IteratorEnvironment env) throws IOException {
List<IterInfo> ssiList = new ArrayList<>();
Map<String,Map<String,String>> ssio = new HashMap<>();
for (IteratorSetting is : iterators) {
ssiList.add(new IterInfo(is.getPriority(), is.getIteratorClass(), is.getName()));
ssio.put(is.getName(), is.getOptions());
}
var ibEnv = loadIterConf(scope, ssiList, ssio, conf);
var iterBuilder = ibEnv.env(env).useClassLoader(ClassLoaderUtil.tableContext(conf)).build();
return loadIterators(source, iterBuilder);
}
/**
* Load a stack of iterators provided in the iterator builder, starting with source.
*/
public static SortedKeyValueIterator<Key,Value>
loadIterators(SortedKeyValueIterator<Key,Value> source, IteratorBuilder iteratorBuilder)
throws IOException {
SortedKeyValueIterator<Key,Value> prev = source;
final boolean useClassLoader = iteratorBuilder.useAccumuloClassLoader;
Map<String,Class<SortedKeyValueIterator<Key,Value>>> classCache = new HashMap<>();
try {
for (IterInfo iterInfo : iteratorBuilder.iters) {
Class<SortedKeyValueIterator<Key,Value>> clazz = null;
log.trace("Attempting to load iterator class {}", iterInfo.className);
if (iteratorBuilder.useClassCache) {
clazz = classCache.get(iterInfo.className);
if (clazz == null) {
clazz = loadClass(useClassLoader, iteratorBuilder.context, iterInfo);
classCache.put(iterInfo.className, clazz);
}
} else {
clazz = loadClass(useClassLoader, iteratorBuilder.context, iterInfo);
}
SortedKeyValueIterator<Key,Value> skvi = clazz.getDeclaredConstructor().newInstance();
Map<String,String> options = iteratorBuilder.iterOpts.get(iterInfo.iterName);
if (options == null) {
options = Collections.emptyMap();
}
skvi.init(prev, options, iteratorBuilder.iteratorEnvironment);
prev = skvi;
}
} catch (ReflectiveOperationException e) {
log.error(e.toString());
throw new IllegalStateException(e);
}
return prev;
}
private static Class<SortedKeyValueIterator<Key,Value>> loadClass(boolean useAccumuloClassLoader,
String context, IterInfo iterInfo) throws ClassNotFoundException {
if (useAccumuloClassLoader) {
@SuppressWarnings("unchecked")
var clazz = (Class<SortedKeyValueIterator<Key,Value>>) ClassLoaderUtil.loadClass(context,
iterInfo.className, SortedKeyValueIterator.class);
log.trace("Iterator class {} loaded from context {}, classloader: {}", iterInfo.className,
context, clazz.getClassLoader());
return clazz;
}
@SuppressWarnings("unchecked")
var clazz = (Class<SortedKeyValueIterator<Key,Value>>) Class.forName(iterInfo.className)
.asSubclass(SortedKeyValueIterator.class);
log.trace("Iterator class {} loaded from classpath", iterInfo.className);
return clazz;
}
}
| 9,535 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/IteratorBuilderImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl;
import java.util.Collection;
import java.util.Map;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
public class IteratorBuilderImpl
implements IteratorBuilder.IteratorBuilderEnv, IteratorBuilder.IteratorBuilderOptions {
Collection<IterInfo> iters;
Map<String,Map<String,String>> iterOpts;
IteratorEnvironment iteratorEnvironment;
boolean useAccumuloClassLoader = false;
String context = null;
boolean useClassCache = false;
public IteratorBuilderImpl(Collection<IterInfo> iters) {
this.iters = iters;
}
public IteratorBuilder.IteratorBuilderEnv opts(Map<String,Map<String,String>> iterOpts) {
this.iterOpts = iterOpts;
return this;
}
@Override
public IteratorBuilder.IteratorBuilderOptions env(IteratorEnvironment iteratorEnvironment) {
this.iteratorEnvironment = iteratorEnvironment;
return this;
}
@Override
public IteratorBuilder.IteratorBuilderOptions useClassLoader(String context) {
this.useAccumuloClassLoader = true;
this.context = context;
return this;
}
@Override
public IteratorBuilder.IteratorBuilderOptions useClassCache(boolean useClassCache) {
this.useClassCache = useClassCache;
return this;
}
@Override
public IteratorBuilder build() {
var ib = new IteratorBuilder();
ib.iters = this.iters;
ib.iterOpts = this.iterOpts;
ib.iteratorEnvironment = this.iteratorEnvironment;
ib.useAccumuloClassLoader = this.useAccumuloClassLoader;
ib.context = this.context;
ib.useClassCache = this.useClassCache;
return ib;
}
}
| 9,536 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/IteratorBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl;
import java.util.Collection;
import java.util.Map;
import org.apache.accumulo.core.dataImpl.thrift.IterInfo;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
/**
* Builder class for setting up the iterator stack.
*/
public class IteratorBuilder {
Collection<IterInfo> iters;
Map<String,Map<String,String>> iterOpts;
IteratorEnvironment iteratorEnvironment;
boolean useAccumuloClassLoader;
String context = null;
boolean useClassCache = false;
IteratorBuilder() {}
/**
* Start building the iterator builder.
*/
public static IteratorBuilderImpl builder(Collection<IterInfo> iters) {
return new IteratorBuilderImpl(iters);
}
public interface IteratorBuilderEnv {
/**
* Set the iteratorEnvironment.
*/
IteratorBuilderOptions env(IteratorEnvironment iteratorEnvironment);
}
public interface IteratorBuilderOptions extends IteratorBuilderEnv {
/**
* Option to iterator classes when loading, defaults to false.
*/
IteratorBuilderOptions useClassCache(boolean useClassCache);
/**
* Call to use the class loader. The String context param is optional and can be null.
*/
IteratorBuilderOptions useClassLoader(String context);
/**
* Finish building and return the completed IteratorBuilder.
*/
IteratorBuilder build();
}
}
| 9,537 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/SequenceFileIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.FileSKVIterator;
import org.apache.accumulo.core.file.blockfile.impl.CacheProvider;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.Reader;
import org.apache.hadoop.io.Text;
public class SequenceFileIterator implements FileSKVIterator {
private Reader reader;
private Value top_value;
private Key top_key;
private boolean readValue;
@Override
public SequenceFileIterator deepCopy(IteratorEnvironment env) {
throw new UnsupportedOperationException("SequenceFileIterator does not yet support cloning");
}
@Override
public void closeDeepCopies() throws IOException {
throw new UnsupportedOperationException();
}
public SequenceFileIterator(SequenceFile.Reader reader, boolean readValue) throws IOException {
this.reader = reader;
this.readValue = readValue;
top_key = new Key();
if (readValue) {
top_value = new Value();
}
next();
}
@Override
public Key getTopKey() {
return top_key;
}
@Override
public Value getTopValue() {
return top_value;
}
@Override
public boolean hasTop() {
return top_key != null;
}
@Override
public void next() throws IOException {
boolean valid;
if (readValue) {
valid = reader.next(top_key, top_value);
} else {
valid = reader.next(top_key);
}
if (!valid) {
top_key = null;
top_value = null;
}
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
throw new UnsupportedOperationException("seek() not supported");
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void close() throws IOException {
reader.close();
}
@Override
public Text getFirstRow() throws IOException {
throw new UnsupportedOperationException("getFirstKey() not supported");
}
@Override
public Text getLastRow() throws IOException {
throw new UnsupportedOperationException("getLastKey() not supported");
}
@Override
public DataInputStream getMetaStore(String name) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void setInterruptFlag(AtomicBoolean flag) {
throw new UnsupportedOperationException();
}
@Override
public FileSKVIterator getSample(SamplerConfigurationImpl sampleConfig) {
throw new UnsupportedOperationException();
}
@Override
public void setCacheProvider(CacheProvider cacheProvider) {}
}
| 9,538 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/LocalityGroupIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.commons.lang3.mutable.MutableLong;
public class LocalityGroupIterator extends HeapIterator implements InterruptibleIterator {
private static final Collection<ByteSequence> EMPTY_CF_SET = Collections.emptySet();
public static class LocalityGroup {
private LocalityGroup(LocalityGroup localityGroup, IteratorEnvironment env) {
this(localityGroup.columnFamilies, localityGroup.isDefaultLocalityGroup);
this.iterator = (InterruptibleIterator) localityGroup.iterator.deepCopy(env);
}
public LocalityGroup(InterruptibleIterator iterator,
Map<ByteSequence,MutableLong> columnFamilies, boolean isDefaultLocalityGroup) {
this(columnFamilies, isDefaultLocalityGroup);
this.iterator = iterator;
}
public LocalityGroup(Map<ByteSequence,MutableLong> columnFamilies,
boolean isDefaultLocalityGroup) {
this.isDefaultLocalityGroup = isDefaultLocalityGroup;
this.columnFamilies = columnFamilies;
}
public InterruptibleIterator getIterator() {
return iterator;
}
protected boolean isDefaultLocalityGroup;
protected Map<ByteSequence,MutableLong> columnFamilies;
private InterruptibleIterator iterator;
}
public static class LocalityGroupContext {
final List<LocalityGroup> groups;
final LocalityGroup defaultGroup;
final Map<ByteSequence,LocalityGroup> groupByCf;
public LocalityGroupContext(LocalityGroup[] groups) {
this.groups = Collections.unmodifiableList(Arrays.asList(groups));
this.groupByCf = new HashMap<>();
LocalityGroup foundDefault = null;
for (LocalityGroup group : groups) {
if (group.isDefaultLocalityGroup && group.columnFamilies == null) {
if (foundDefault != null) {
throw new IllegalStateException("Found multiple default locality groups");
}
foundDefault = group;
} else {
for (Entry<ByteSequence,MutableLong> entry : group.columnFamilies.entrySet()) {
if (entry.getValue().longValue() > 0) {
if (groupByCf.containsKey(entry.getKey())) {
throw new IllegalStateException("Found the same cf in multiple locality groups");
}
groupByCf.put(entry.getKey(), group);
}
}
}
}
defaultGroup = foundDefault;
}
}
/**
* This will cache the arguments used in the seek call along with the locality groups seeked.
*/
public static class LocalityGroupSeekCache {
private Set<ByteSequence> lastColumnFamilies;
private volatile boolean lastInclusive;
private Collection<LocalityGroup> lastUsed;
public Set<ByteSequence> getLastColumnFamilies() {
return lastColumnFamilies;
}
public boolean isLastInclusive() {
return lastInclusive;
}
public Collection<LocalityGroup> getLastUsed() {
return lastUsed;
}
public int getNumLGSeeked() {
return (lastUsed == null ? 0 : lastUsed.size());
}
}
private final LocalityGroupContext lgContext;
private LocalityGroupSeekCache lgCache;
private AtomicBoolean interruptFlag;
public LocalityGroupIterator(LocalityGroup[] groups) {
super(groups.length);
this.lgContext = new LocalityGroupContext(groups);
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
throw new UnsupportedOperationException();
}
/**
* This is the seek work horse for a HeapIterator with locality groups (uses by the InMemory and
* RFile mechanisms). This method will find the locality groups to use in the
* LocalityGroupContext, and will seek those groups.
*
* @param hiter The heap iterator
* @param lgContext The locality groups
* @param range The range to seek
* @param columnFamilies The column fams to seek
* @param inclusive The inclusiveness of the column fams
* @return The locality groups seeked
* @throws IOException thrown if an locality group seek fails
*/
static final Collection<LocalityGroup> _seek(HeapIterator hiter, LocalityGroupContext lgContext,
Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
hiter.clear();
final Set<ByteSequence> cfSet = getCfSet(columnFamilies);
// determine the set of groups to use
final Collection<LocalityGroup> groups = getLocalityGroups(lgContext, inclusive, cfSet);
for (LocalityGroup lgr : groups) {
lgr.getIterator().seek(range, EMPTY_CF_SET, false);
hiter.addSource(lgr.getIterator());
}
return groups;
}
private static Collection<LocalityGroup> getLocalityGroups(LocalityGroupContext lgContext,
boolean inclusive, Set<ByteSequence> cfSet) {
final Collection<LocalityGroup> groups;
// if no column families specified, then include all groups unless !inclusive
if (cfSet.isEmpty()) {
groups = inclusive ? List.of() : lgContext.groups;
} else {
groups = new HashSet<>();
// do not know what column families are in the default locality group,
// only know what column families are not in it
if (lgContext.defaultGroup != null) {
if (inclusive) {
if (!lgContext.groupByCf.keySet().containsAll(cfSet)) {
// default LG may contain wanted and unwanted column families
groups.add(lgContext.defaultGroup);
} // else - everything wanted is in other locality groups, so nothing to do
} else {
// must include the default group as it may include cfs not in our cfSet
groups.add(lgContext.defaultGroup);
}
}
/*
* Need to consider the following cases for inclusive and exclusive (lgcf:locality group
* column family set, cf:column family set) lgcf and cf are disjoint lgcf and cf are the same
* cf contains lgcf lgcf contains cf lgccf and cf intersect but neither is a subset of the
* other
*/
if (!inclusive) {
lgContext.groupByCf.entrySet().stream().filter(entry -> !cfSet.contains(entry.getKey()))
.map(Entry::getValue).forEach(groups::add);
} else if (lgContext.groupByCf.size() <= cfSet.size()) {
lgContext.groupByCf.entrySet().stream().filter(entry -> cfSet.contains(entry.getKey()))
.map(Entry::getValue).forEach(groups::add);
} else {
cfSet.stream().map(lgContext.groupByCf::get).filter(Objects::nonNull).forEach(groups::add);
}
}
return groups;
}
private static Set<ByteSequence> getCfSet(Collection<ByteSequence> columnFamilies) {
final Set<ByteSequence> cfSet;
if (columnFamilies.isEmpty()) {
cfSet = Collections.emptySet();
} else {
if (columnFamilies instanceof Set<?>) {
cfSet = (Set<ByteSequence>) columnFamilies;
} else {
cfSet = Set.copyOf(columnFamilies);
}
}
return cfSet;
}
/**
* This seek method will reuse the supplied LocalityGroupSeekCache if it can. Otherwise it will
* delegate to the _seek method.
*
* @param hiter The heap iterator
* @param lgContext The locality groups
* @param range The range to seek
* @param columnFamilies The column fams to seek
* @param inclusive The inclusiveness of the column fams
* @param lgSeekCache A cache returned by the previous call to this method
* @return A cache for this seek call
* @throws IOException thrown if an locality group seek fails
*/
public static LocalityGroupSeekCache seek(HeapIterator hiter, LocalityGroupContext lgContext,
Range range, Collection<ByteSequence> columnFamilies, boolean inclusive,
LocalityGroupSeekCache lgSeekCache) throws IOException {
if (lgSeekCache == null) {
lgSeekCache = new LocalityGroupSeekCache();
}
// determine if the arguments have changed since the last time
boolean sameArgs = false;
Set<ByteSequence> cfSet = null;
if (lgSeekCache.lastUsed != null && inclusive == lgSeekCache.lastInclusive) {
if (columnFamilies instanceof Set) {
sameArgs = lgSeekCache.lastColumnFamilies.equals(columnFamilies);
} else {
cfSet = Set.copyOf(columnFamilies);
sameArgs = lgSeekCache.lastColumnFamilies.equals(cfSet);
}
}
// if the column families and inclusiveness have not changed, then we can simply re-seek the
// locality groups we discovered last round and rebuild the heap.
if (sameArgs) {
hiter.clear();
for (LocalityGroup lgr : lgSeekCache.lastUsed) {
lgr.getIterator().seek(range, EMPTY_CF_SET, false);
hiter.addSource(lgr.getIterator());
}
} else { // otherwise capture the parameters, and use the static seek method to locate the
// locality groups to use.
lgSeekCache.lastColumnFamilies = (cfSet == null ? Set.copyOf(columnFamilies) : cfSet);
lgSeekCache.lastInclusive = inclusive;
lgSeekCache.lastUsed = _seek(hiter, lgContext, range, columnFamilies, inclusive);
}
return lgSeekCache;
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
lgCache = seek(this, lgContext, range, columnFamilies, inclusive, lgCache);
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
LocalityGroup[] groupsCopy = new LocalityGroup[lgContext.groups.size()];
for (int i = 0; i < lgContext.groups.size(); i++) {
groupsCopy[i] = new LocalityGroup(lgContext.groups.get(i), env);
if (interruptFlag != null) {
groupsCopy[i].getIterator().setInterruptFlag(interruptFlag);
}
}
return new LocalityGroupIterator(groupsCopy);
}
@Override
public void setInterruptFlag(AtomicBoolean flag) {
this.interruptFlag = flag;
for (LocalityGroup lgr : lgContext.groups) {
lgr.getIterator().setInterruptFlag(flag);
}
}
}
| 9,539 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/SortedMapIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.client.SampleNotPresentException;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
/**
* A simple iterator over a Java SortedMap
*
* Note that this class is intended as an in-memory replacement for RFile$Reader, so its behavior
* reflects the same assumptions; namely, that this iterator is not responsible for respecting the
* columnFamilies passed into seek(). If you want a Map-backed Iterator that returns only sought
* CFs, construct a new ColumnFamilySkippingIterator(new SortedMapIterator(map)).
*
* @see ColumnFamilySkippingIterator
*/
public class SortedMapIterator implements InterruptibleIterator {
private Iterator<Entry<Key,Value>> iter;
private Entry<Key,Value> entry;
private SortedMap<Key,Value> map;
private Range range;
private AtomicBoolean interruptFlag;
private int interruptCheckCount = 0;
@Override
public SortedMapIterator deepCopy(IteratorEnvironment env) {
if (env != null && env.isSamplingEnabled()) {
throw new SampleNotPresentException();
}
return new SortedMapIterator(map, interruptFlag);
}
private SortedMapIterator(SortedMap<Key,Value> map, AtomicBoolean interruptFlag) {
this.map = map;
iter = null;
this.range = new Range();
entry = null;
this.interruptFlag = interruptFlag;
}
public SortedMapIterator(SortedMap<Key,Value> map) {
this(map, null);
}
@Override
public Key getTopKey() {
return entry.getKey();
}
@Override
public Value getTopValue() {
return entry.getValue();
}
@Override
public boolean hasTop() {
return entry != null;
}
@Override
public void next() throws IOException {
if (entry == null) {
throw new IllegalStateException();
}
if (interruptFlag != null && interruptCheckCount++ % 100 == 0 && interruptFlag.get()) {
throw new IterationInterruptedException();
}
if (iter.hasNext()) {
entry = iter.next();
if (range.afterEndKey(entry.getKey())) {
entry = null;
}
} else {
entry = null;
}
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
if (interruptFlag != null && interruptFlag.get()) {
throw new IterationInterruptedException();
}
this.range = range;
Key key = range.getStartKey();
if (key == null) {
key = new Key();
}
iter = map.tailMap(key).entrySet().iterator();
if (iter.hasNext()) {
entry = iter.next();
if (range.afterEndKey(entry.getKey())) {
entry = null;
}
} else {
entry = null;
}
while (hasTop() && range.beforeStartKey(getTopKey())) {
next();
}
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void setInterruptFlag(AtomicBoolean flag) {
this.interruptFlag = flag;
}
}
| 9,540 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/StatsIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.Collection;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.ServerWrappingIterator;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
public class StatsIterator extends ServerWrappingIterator {
private int numRead = 0;
private AtomicLong seekCounter;
private AtomicLong scanCounter;
private LongAdder serverScanCounter;
public StatsIterator(SortedKeyValueIterator<Key,Value> source, AtomicLong seekCounter,
AtomicLong tabletScanCounter, LongAdder serverScanCounter) {
super(source);
this.seekCounter = seekCounter;
this.scanCounter = tabletScanCounter;
this.serverScanCounter = serverScanCounter;
}
@Override
public void next() throws IOException {
source.next();
numRead++;
if (numRead % 23 == 0) {
scanCounter.addAndGet(numRead);
serverScanCounter.add(numRead);
numRead = 0;
}
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new StatsIterator(source.deepCopy(env), seekCounter, scanCounter, serverScanCounter);
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
source.seek(range, columnFamilies, inclusive);
seekCounter.incrementAndGet();
scanCounter.addAndGet(numRead);
serverScanCounter.add(numRead);
numRead = 0;
}
public void report() {
scanCounter.addAndGet(numRead);
serverScanCounter.add(numRead);
numRead = 0;
}
}
| 9,541 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/SystemIteratorUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.tabletserver.thrift.IteratorConfig;
import org.apache.accumulo.core.tabletserver.thrift.TIteratorSetting;
/**
* System utility class. Not for client use.
*/
public class SystemIteratorUtil {
public static TIteratorSetting toTIteratorSetting(IteratorSetting is) {
return new TIteratorSetting(is.getPriority(), is.getName(), is.getIteratorClass(),
is.getOptions());
}
public static IteratorSetting toIteratorSetting(TIteratorSetting tis) {
return new IteratorSetting(tis.getPriority(), tis.getName(), tis.getIteratorClass(),
tis.getProperties());
}
public static IteratorConfig toIteratorConfig(List<IteratorSetting> iterators) {
ArrayList<TIteratorSetting> tisList = new ArrayList<>();
for (IteratorSetting iteratorSetting : iterators) {
tisList.add(toTIteratorSetting(iteratorSetting));
}
return new IteratorConfig(tisList);
}
public static SortedKeyValueIterator<Key,Value> setupSystemScanIterators(
SortedKeyValueIterator<Key,Value> source, Set<Column> cols, Authorizations auths,
byte[] defaultVisibility, AccumuloConfiguration conf) throws IOException {
SortedKeyValueIterator<Key,Value> delIter =
DeletingIterator.wrap(source, false, DeletingIterator.getBehavior(conf));
ColumnFamilySkippingIterator cfsi = new ColumnFamilySkippingIterator(delIter);
SortedKeyValueIterator<Key,Value> colFilter = ColumnQualifierFilter.wrap(cfsi, cols);
return VisibilityFilter.wrap(colFilter, auths, defaultVisibility);
}
}
| 9,542 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/TimeSettingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
public class TimeSettingIterator implements InterruptibleIterator {
private SortedKeyValueIterator<Key,Value> source;
private long time;
private Range range;
public TimeSettingIterator(SortedKeyValueIterator<Key,Value> source, long time) {
this.source = source;
this.time = time;
}
@Override
public Key getTopKey() {
Key key = source.getTopKey();
key.setTimestamp(time);
return key;
}
@Override
public void setInterruptFlag(AtomicBoolean flag) {
((InterruptibleIterator) source).setInterruptFlag(flag);
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new TimeSettingIterator(source.deepCopy(env), time);
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
}
@Override
public boolean hasTop() {
return source.hasTop() && !range.afterEndKey(getTopKey());
}
@Override
public void next() throws IOException {
source.next();
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
Range seekRange = IteratorUtil.maximizeStartKeyTimeStamp(range);
seekRange = IteratorUtil.minimizeEndKeyTimeStamp(seekRange);
source.seek(seekRange, columnFamilies, inclusive);
this.range = range;
while (hasTop() && range.beforeStartKey(getTopKey())) {
next();
}
}
@Override
public Value getTopValue() {
return source.getTopValue();
}
}
| 9,543 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/SourceSwitchingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.YieldCallback;
/**
* A SortedKeyValueIterator which presents a view over some section of data, regardless of whether
* or not it is backed by memory (InMemoryMap) or an RFile (InMemoryMap that was minor compacted to
* a file). Clients reading from a table that has data in memory should not see interruption in
* their scan when that data is minor compacted. This iterator is designed to manage this behind the
* scene.
*/
public class SourceSwitchingIterator implements InterruptibleIterator {
public interface DataSource {
boolean isCurrent();
DataSource getNewDataSource();
DataSource getDeepCopyDataSource(IteratorEnvironment env);
SortedKeyValueIterator<Key,Value> iterator() throws IOException;
void setInterruptFlag(AtomicBoolean flag);
default void close(boolean sawErrors) {}
}
private DataSource source;
private SortedKeyValueIterator<Key,Value> iter;
private Optional<YieldCallback<Key>> yield = Optional.empty();
private Key key;
private Value val;
private Range range;
private boolean inclusive;
private Collection<ByteSequence> columnFamilies;
private boolean onlySwitchAfterRow;
// Synchronization on copies synchronizes operations across all deep copies of this instance.
//
// This implementation assumes that there is one thread reading data (a scan) from all deep copies
// and that another thread may call switch at any point. A single scan may have multiple deep
// copies of this iterator if other iterators above this one duplicate their source. For example,
// if an IntersectingIterator over two columns was configured, `copies` would contain two SSIs
// instead of just one SSI. The two instances in `copies` would both be at the same "level"
// in the tree of iterators for the scan. If multiple instances of SSI are configure in the
// iterator
// tree (e.g. priority 8 and priority 12), each instance would share their own `copies` e.g.
// SSI@priority8:copies1[...], SSI@priority12:copies2[...]
private final List<SourceSwitchingIterator> copies;
private SourceSwitchingIterator(DataSource source, boolean onlySwitchAfterRow,
List<SourceSwitchingIterator> copies) {
this.source = source;
this.onlySwitchAfterRow = onlySwitchAfterRow;
this.copies = copies;
copies.add(this);
}
public SourceSwitchingIterator(DataSource source, boolean onlySwitchAfterRow) {
this(source, onlySwitchAfterRow, new ArrayList<>());
}
public SourceSwitchingIterator(DataSource source) {
this(source, false);
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
synchronized (copies) {
return new SourceSwitchingIterator(source.getDeepCopyDataSource(env), onlySwitchAfterRow,
copies);
}
}
@Override
public Key getTopKey() {
return key;
}
@Override
public Value getTopValue() {
return val;
}
@Override
public boolean hasTop() {
return key != null;
}
@Override
public void enableYielding(YieldCallback<Key> yield) {
this.yield = Optional.of(yield);
// if we require row isolation, then we cannot support yielding in the middle.
if (!onlySwitchAfterRow) {
if (iter != null) {
iter.enableYielding(yield);
}
}
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void next() throws IOException {
synchronized (copies) {
readNext(false);
}
}
private void readNext(boolean initialSeek) throws IOException {
// we need to check here if we were yielded in case the source was switched out and re-seeked by
// someone else (minor compaction/InMemoryMap)
boolean yielded = (yield.isPresent() && yield.orElseThrow().hasYielded());
// check of initialSeek second is intentional so that it does not short
// circuit the call to switchSource
boolean seekNeeded = yielded || (!onlySwitchAfterRow && switchSource()) || initialSeek;
if (seekNeeded) {
if (initialSeek) {
iter.seek(range, columnFamilies, inclusive);
} else if (yielded) {
Key yieldPosition = yield.orElseThrow().getPositionAndReset();
if (!range.contains(yieldPosition)) {
throw new IOException("Underlying iterator yielded to a position outside of its range: "
+ yieldPosition + " not in " + range);
}
iter.seek(new Range(yieldPosition, false, range.getEndKey(), range.isEndKeyInclusive()),
columnFamilies, inclusive);
} else {
iter.seek(new Range(key, false, range.getEndKey(), range.isEndKeyInclusive()),
columnFamilies, inclusive);
}
} else {
iter.next();
if (onlySwitchAfterRow && iter.hasTop() && !source.isCurrent()
&& !key.getRowData().equals(iter.getTopKey().getRowData())) {
switchSource();
iter.seek(new Range(key.followingKey(PartialKey.ROW), true, range.getEndKey(),
range.isEndKeyInclusive()), columnFamilies, inclusive);
}
}
if (iter.hasTop()) {
if (yield.isPresent() && yield.orElseThrow().hasYielded()) {
throw new IOException("Coding error: hasTop returned true but has yielded at "
+ yield.orElseThrow().getPositionAndReset());
}
Key nextKey = iter.getTopKey();
Value nextVal = iter.getTopValue();
try {
key = (Key) nextKey.clone();
} catch (CloneNotSupportedException e) {
throw new IOException(e);
}
val = nextVal;
} else {
key = null;
val = null;
}
}
private boolean switchSource() throws IOException {
if (!source.isCurrent()) {
source = source.getNewDataSource();
iter = source.iterator();
if (!onlySwitchAfterRow && yield.isPresent()) {
iter.enableYielding(yield.orElseThrow());
}
return true;
}
return false;
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
synchronized (copies) {
this.range = range;
this.inclusive = inclusive;
this.columnFamilies = columnFamilies;
if (iter == null) {
iter = source.iterator();
if (!onlySwitchAfterRow && yield.isPresent()) {
iter.enableYielding(yield.orElseThrow());
}
}
readNext(true);
}
}
private void _switchNow() throws IOException {
if (onlySwitchAfterRow) {
throw new IllegalStateException("Can only switch on row boundaries");
}
if (switchSource()) {
if (key != null) {
iter.seek(new Range(key, true, range.getEndKey(), range.isEndKeyInclusive()),
columnFamilies, inclusive);
}
}
}
public void switchNow() throws IOException {
synchronized (copies) {
for (SourceSwitchingIterator ssi : copies) {
ssi._switchNow();
}
}
}
@Override
public void setInterruptFlag(AtomicBoolean flag) {
synchronized (copies) {
if (copies.size() != 1) {
throw new IllegalStateException(
"setInterruptFlag() called after deep copies made " + copies.size());
}
if (iter != null) {
((InterruptibleIterator) iter).setInterruptFlag(flag);
}
source.setInterruptFlag(flag);
}
}
}
| 9,544 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/InterruptibleIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
/**
* Allows an iterator to be interrupted. Typically, once the interrupt flag is set the iterator will
* throw an {@link InterruptedException} if the interrupt is detected. Some iterators have been
* optimized to not always check the flag.
* <p>
* One example of a system interrupt is when a Tablet is being closed. If a Tablet has an active
* scan and an InterruptibleIterator is configured on that Table, then it will be interrupted when
* the Tablet is closed.
*/
public interface InterruptibleIterator extends SortedKeyValueIterator<Key,Value> {
void setInterruptFlag(AtomicBoolean flag);
}
| 9,545 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/SampleIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import org.apache.accumulo.core.client.sample.RowSampler;
import org.apache.accumulo.core.client.sample.Sampler;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.Filter;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
public class SampleIterator extends Filter {
private Sampler sampler = new RowSampler();
public SampleIterator(SortedKeyValueIterator<Key,Value> iter, Sampler sampler) {
setSource(iter);
this.sampler = sampler;
}
@Override
public boolean accept(Key k, Value v) {
return sampler.accept(k);
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new SampleIterator(getSource().deepCopy(env), sampler);
}
}
| 9,546 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/VisibilityFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.SynchronizedServerFilter;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.core.security.VisibilityEvaluator;
import org.apache.accumulo.core.security.VisibilityParseException;
import org.apache.accumulo.core.util.BadArgumentException;
import org.apache.commons.collections4.map.LRUMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A SortedKeyValueIterator that filters based on ColumnVisibility and optimized for use with system
* iterators. Prior to 2.0, this class extended {@link org.apache.accumulo.core.iterators.Filter}
* and all system iterators where wrapped with a <code>SynchronizedIterator</code> during creation
* of the iterator stack in {@link org.apache.accumulo.core.iterators.IteratorUtil}
* .loadIterators(). For performance reasons, the synchronization was pushed down the stack to this
* class.
*/
public class VisibilityFilter extends SynchronizedServerFilter {
protected VisibilityEvaluator ve;
protected ByteSequence defaultVisibility;
protected LRUMap<ByteSequence,Boolean> cache;
protected Authorizations authorizations;
private static final Logger log = LoggerFactory.getLogger(VisibilityFilter.class);
private VisibilityFilter(SortedKeyValueIterator<Key,Value> iterator,
Authorizations authorizations, byte[] defaultVisibility) {
super(iterator);
this.ve = new VisibilityEvaluator(authorizations);
this.authorizations = authorizations;
this.defaultVisibility = new ArrayByteSequence(defaultVisibility);
this.cache = new LRUMap<>(1000);
}
@Override
public synchronized SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new VisibilityFilter(source.deepCopy(env), authorizations, defaultVisibility.toArray());
}
@Override
protected boolean accept(Key k, Value v) {
ByteSequence testVis = k.getColumnVisibilityData();
if (testVis.length() == 0 && defaultVisibility.length() == 0) {
return true;
} else if (testVis.length() == 0) {
testVis = defaultVisibility;
}
Boolean b = cache.get(testVis);
if (b != null) {
return b;
}
try {
boolean bb = ve.evaluate(new ColumnVisibility(testVis.toArray()));
cache.put(testVis, bb);
return bb;
} catch (VisibilityParseException e) {
log.error("VisibilityParseException with visibility of Key: {}", k, e);
return false;
} catch (BadArgumentException e) {
log.error("BadArgumentException with visibility of Key: {}", k, e);
return false;
}
}
private static class EmptyAuthsVisibilityFilter extends SynchronizedServerFilter {
public EmptyAuthsVisibilityFilter(SortedKeyValueIterator<Key,Value> source) {
super(source);
}
@Override
public synchronized SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new EmptyAuthsVisibilityFilter(source.deepCopy(env));
}
@Override
protected boolean accept(Key k, Value v) {
return k.getColumnVisibilityData().length() == 0;
}
}
public static SortedKeyValueIterator<Key,Value> wrap(SortedKeyValueIterator<Key,Value> source,
Authorizations authorizations, byte[] defaultVisibility) {
if (authorizations.isEmpty() && defaultVisibility.length == 0) {
return new EmptyAuthsVisibilityFilter(source);
} else {
return new VisibilityFilter(source, authorizations, defaultVisibility);
}
}
}
| 9,547 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/DeletingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.IteratorUtil;
import org.apache.accumulo.core.iterators.ServerWrappingIterator;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
public class DeletingIterator extends ServerWrappingIterator {
private boolean propagateDeletes;
private Key workKey = new Key();
public enum Behavior {
PROCESS, FAIL
}
@Override
public DeletingIterator deepCopy(IteratorEnvironment env) {
return new DeletingIterator(this, env);
}
private DeletingIterator(DeletingIterator other, IteratorEnvironment env) {
super(other.source.deepCopy(env));
propagateDeletes = other.propagateDeletes;
}
private DeletingIterator(SortedKeyValueIterator<Key,Value> iterator, boolean propagateDeletes) {
super(iterator);
this.propagateDeletes = propagateDeletes;
}
@Override
public void next() throws IOException {
if (source.getTopKey().isDeleted()) {
skipRowColumn();
} else {
source.next();
}
findTop();
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
// do not want to seek to the middle of a row
Range seekRange = IteratorUtil.maximizeStartKeyTimeStamp(range);
source.seek(seekRange, columnFamilies, inclusive);
findTop();
if (range.getStartKey() != null) {
while (source.hasTop() && source.getTopKey().compareTo(range.getStartKey(),
PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME) < 0) {
next();
}
while (hasTop() && range.beforeStartKey(getTopKey())) {
next();
}
}
}
private void findTop() throws IOException {
if (!propagateDeletes) {
while (source.hasTop() && source.getTopKey().isDeleted()) {
skipRowColumn();
}
}
}
private void skipRowColumn() throws IOException {
workKey.set(source.getTopKey());
Key keyToSkip = workKey;
source.next();
while (source.hasTop()
&& source.getTopKey().equals(keyToSkip, PartialKey.ROW_COLFAM_COLQUAL_COLVIS)) {
source.next();
}
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) {
throw new UnsupportedOperationException();
}
public static SortedKeyValueIterator<Key,Value> wrap(SortedKeyValueIterator<Key,Value> source,
boolean propagateDeletes, Behavior behavior) {
switch (behavior) {
case PROCESS:
return new DeletingIterator(source, propagateDeletes);
case FAIL:
return new ServerWrappingIterator(source) {
@Override
public Key getTopKey() {
Key top = source.getTopKey();
if (top.isDeleted()) {
throw new IllegalStateException("Saw unexpected delete " + top);
}
return top;
}
};
default:
throw new IllegalArgumentException("Unknown behavior " + behavior);
}
}
public static Behavior getBehavior(AccumuloConfiguration conf) {
return DeletingIterator.Behavior
.valueOf(conf.get(Property.TABLE_DELETE_BEHAVIOR).toUpperCase());
}
}
| 9,548 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/SynchronizedIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
/**
* Wraps a SortedKeyValueIterator so that all of its methods are synchronized. The intent is that
* user iterators which are multi-threaded have the possibility to call parent methods concurrently.
* The SynchronizedIterators aims to reduce the likelihood of unwanted concurrent access.
*/
public class SynchronizedIterator<K extends WritableComparable<?>,V extends Writable>
implements SortedKeyValueIterator<K,V> {
private final SortedKeyValueIterator<K,V> source;
@Override
public synchronized void init(SortedKeyValueIterator<K,V> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public synchronized boolean hasTop() {
return source.hasTop();
}
@Override
public synchronized void next() throws IOException {
source.next();
}
@Override
public synchronized void seek(Range range, Collection<ByteSequence> columnFamilies,
boolean inclusive) throws IOException {
source.seek(range, columnFamilies, inclusive);
}
@Override
public synchronized K getTopKey() {
return source.getTopKey();
}
@Override
public synchronized V getTopValue() {
return source.getTopValue();
}
@Override
public synchronized SortedKeyValueIterator<K,V> deepCopy(IteratorEnvironment env) {
return new SynchronizedIterator<>(source.deepCopy(env));
}
public SynchronizedIterator(SortedKeyValueIterator<K,V> source) {
this.source = source;
}
}
| 9,549 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/ColumnFamilySkippingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.ServerSkippingIterator;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
public class ColumnFamilySkippingIterator extends ServerSkippingIterator
implements InterruptibleIterator {
protected Set<ByteSequence> colFamSet = null;
protected TreeSet<ByteSequence> sortedColFams = null;
protected boolean inclusive = false;
protected Range range;
public ColumnFamilySkippingIterator(SortedKeyValueIterator<Key,Value> source) {
super(source);
}
protected ColumnFamilySkippingIterator(SortedKeyValueIterator<Key,Value> source,
Set<ByteSequence> colFamSet, boolean inclusive) {
this(source);
this.colFamSet = colFamSet;
this.inclusive = inclusive;
}
@Override
protected void consume() throws IOException {
int count = 0;
if (inclusive) {
while (source.hasTop() && !colFamSet.contains(source.getTopKey().getColumnFamilyData())) {
if (count < 10) {
// it is quicker to call next if we are close, but we never know if we are close
// so give next a try a few times
source.next();
count++;
} else {
ByteSequence higherCF = sortedColFams.higher(source.getTopKey().getColumnFamilyData());
if (higherCF == null) {
// seek to the next row
reseek(source.getTopKey().followingKey(PartialKey.ROW));
} else {
// seek to the next column family in the sorted list of column families
reseek(new Key(source.getTopKey().getRowData().toArray(), higherCF.toArray(),
new byte[0], new byte[0], Long.MAX_VALUE));
}
count = 0;
}
}
} else if (colFamSet != null && !colFamSet.isEmpty()) {
while (source.hasTop() && colFamSet.contains(source.getTopKey().getColumnFamilyData())) {
if (count < 10) {
source.next();
count++;
} else {
// seek to the next column family in the data
reseek(source.getTopKey().followingKey(PartialKey.ROW_COLFAM));
count = 0;
}
}
}
}
private void reseek(Key key) throws IOException {
if (range.afterEndKey(key)) {
range = new Range(range.getEndKey(), true, range.getEndKey(), range.isEndKeyInclusive());
source.seek(range, colFamSet, inclusive);
} else {
range = new Range(key, true, range.getEndKey(), range.isEndKeyInclusive());
source.seek(range, colFamSet, inclusive);
}
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new ColumnFamilySkippingIterator(source.deepCopy(env), colFamSet, inclusive);
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
if (columnFamilies instanceof Set<?>) {
colFamSet = (Set<ByteSequence>) columnFamilies;
} else {
colFamSet = new HashSet<>();
colFamSet.addAll(columnFamilies);
}
if (inclusive) {
sortedColFams = new TreeSet<>(colFamSet);
} else {
sortedColFams = null;
}
this.range = range;
this.inclusive = inclusive;
super.seek(range, colFamSet, inclusive);
}
@Override
public void setInterruptFlag(AtomicBoolean flag) {
((InterruptibleIterator) source).setInterruptFlag(flag);
}
}
| 9,550 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/CountingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.Map;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.WrappingIterator;
public class CountingIterator extends WrappingIterator {
private long count;
@Override
public CountingIterator deepCopy(IteratorEnvironment env) {
return new CountingIterator(this, env);
}
private CountingIterator(CountingIterator other, IteratorEnvironment env) {
setSource(other.getSource().deepCopy(env));
count = 0;
}
public CountingIterator(SortedKeyValueIterator<Key,Value> source) {
this.setSource(source);
count = 0;
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) {
throw new UnsupportedOperationException();
}
@Override
public void next() throws IOException {
super.next();
count++;
}
public long getCount() {
return count;
}
}
| 9,551 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/MultiIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
/**
* An iterator capable of iterating over other iterators in sorted order.
*/
public class MultiIterator extends HeapIterator {
private List<SortedKeyValueIterator<Key,Value>> iters;
private Range fence;
// deep copy with no seek/scan state
@Override
public MultiIterator deepCopy(IteratorEnvironment env) {
return new MultiIterator(this, env);
}
private MultiIterator(MultiIterator other, IteratorEnvironment env) {
super(other.iters.size());
this.iters = new ArrayList<>();
this.fence = other.fence;
for (SortedKeyValueIterator<Key,Value> iter : other.iters) {
iters.add(iter.deepCopy(env));
}
}
private void init() {
for (SortedKeyValueIterator<Key,Value> skvi : iters) {
addSource(skvi);
}
}
private MultiIterator(List<SortedKeyValueIterator<Key,Value>> iters, Range seekFence,
boolean init) {
super(iters.size());
if (seekFence != null && init) {
// throw this exception because multi-iterator does not seek on init, therefore the
// fence would not be enforced in anyway, so do not want to give the impression it
// will enforce this
throw new IllegalArgumentException("Initializing not supported when seek fence set");
}
this.fence = seekFence;
this.iters = iters;
if (init) {
init();
}
}
public MultiIterator(List<SortedKeyValueIterator<Key,Value>> iters, Range seekFence) {
this(iters, seekFence, false);
}
public MultiIterator(List<SortedKeyValueIterator<Key,Value>> iters2, KeyExtent extent) {
this(iters2, new Range(extent.prevEndRow(), false, extent.endRow(), true), false);
}
public MultiIterator(List<SortedKeyValueIterator<Key,Value>> readers, boolean init) {
this(readers, null, init);
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
clear();
if (fence != null) {
range = fence.clip(range, true);
if (range == null) {
return;
}
}
for (SortedKeyValueIterator<Key,Value> skvi : iters) {
skvi.seek(range, columnFamilies, inclusive);
addSource(skvi);
}
}
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {
throw new UnsupportedOperationException();
}
}
| 9,552 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/ColumnQualifierFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.ServerFilter;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
public class ColumnQualifierFilter extends ServerFilter {
private HashSet<ByteSequence> columnFamilies;
private HashMap<ByteSequence,HashSet<ByteSequence>> columnsQualifiers;
private ColumnQualifierFilter(SortedKeyValueIterator<Key,Value> iterator, Set<Column> columns) {
super(iterator);
this.columnFamilies = new HashSet<>();
this.columnsQualifiers = new HashMap<>();
columns.forEach(col -> {
if (col.columnQualifier != null) {
this.columnsQualifiers
.computeIfAbsent(new ArrayByteSequence(col.columnQualifier), k -> new HashSet<>())
.add(new ArrayByteSequence(col.columnFamily));
} else {
// this whole column family should pass
columnFamilies.add(new ArrayByteSequence(col.columnFamily));
}
});
}
private ColumnQualifierFilter(SortedKeyValueIterator<Key,Value> iterator,
HashSet<ByteSequence> columnFamilies,
HashMap<ByteSequence,HashSet<ByteSequence>> columnsQualifiers) {
super(iterator);
this.columnFamilies = columnFamilies;
this.columnsQualifiers = columnsQualifiers;
}
@Override
public boolean accept(Key key, Value v) {
if (columnFamilies.contains(key.getColumnFamilyData())) {
return true;
}
HashSet<ByteSequence> cfset = columnsQualifiers.get(key.getColumnQualifierData());
// ensure the column qualifier goes with a paired column family,
// it is possible that a column qualifier could occur with a
// column family it was not paired with
return cfset != null && cfset.contains(key.getColumnFamilyData());
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new ColumnQualifierFilter(source.deepCopy(env), columnFamilies, columnsQualifiers);
}
public static SortedKeyValueIterator<Key,Value> wrap(SortedKeyValueIterator<Key,Value> source,
Set<Column> cols) {
boolean sawNonNullQual = false;
for (Column col : cols) {
if (col.getColumnQualifier() != null) {
sawNonNullQual = true;
break;
}
}
if (sawNonNullQual) {
return new ColumnQualifierFilter(source, cols);
} else {
return source;
}
}
}
| 9,553 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/IterationInterruptedException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
/**
* Exception thrown if an interrupt flag is detected.
*/
public class IterationInterruptedException extends RuntimeException {
private static final long serialVersionUID = 1L;
public IterationInterruptedException() {}
public IterationInterruptedException(String msg) {
super(msg);
}
}
| 9,554 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/HeapIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.PriorityQueue;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
/**
* Constructs a {@link PriorityQueue} of multiple SortedKeyValueIterators. Provides a simple way to
* interact with multiple SortedKeyValueIterators in sorted order.
*/
public abstract class HeapIterator implements SortedKeyValueIterator<Key,Value> {
private PriorityQueue<SortedKeyValueIterator<Key,Value>> heap;
private SortedKeyValueIterator<Key,Value> topIdx = null;
private Key nextKey;
protected HeapIterator() {
heap = null;
}
protected HeapIterator(int maxSize) {
createHeap(maxSize);
}
protected void createHeap(int maxSize) {
if (heap != null) {
throw new IllegalStateException("heap already exist");
}
heap = new PriorityQueue<>(maxSize == 0 ? 1 : maxSize,
(si1, si2) -> si1.getTopKey().compareTo(si2.getTopKey()));
}
@Override
public final Key getTopKey() {
return topIdx.getTopKey();
}
@Override
public final Value getTopValue() {
return topIdx.getTopValue();
}
@Override
public final boolean hasTop() {
return topIdx != null;
}
@Override
public final void next() throws IOException {
if (topIdx == null) {
throw new IllegalStateException("Called next() when there is no top");
}
topIdx.next();
if (topIdx.hasTop()) {
if (nextKey == null) {
// topIdx is the only iterator
return;
}
if (nextKey.compareTo(topIdx.getTopKey()) < 0) {
// Grab the next top iterator and put the current top iterator back on the heap
// This updating of references is special-cased to save on percolation on edge cases
// since the current top is guaranteed to not be the minimum
SortedKeyValueIterator<Key,Value> nextTopIdx = heap.remove();
heap.add(topIdx);
topIdx = nextTopIdx;
nextKey = heap.peek().getTopKey();
}
} else {
if (nextKey == null) {
// No iterators left
topIdx = null;
return;
}
pullReferencesFromHeap();
}
}
private void pullReferencesFromHeap() {
topIdx = heap.remove();
if (heap.isEmpty()) {
nextKey = null;
} else {
nextKey = heap.peek().getTopKey();
}
}
protected final void clear() {
heap.clear();
topIdx = null;
nextKey = null;
}
protected final void addSource(SortedKeyValueIterator<Key,Value> source) {
if (source.hasTop()) {
heap.add(source);
if (topIdx != null) {
heap.add(topIdx);
}
pullReferencesFromHeap();
}
}
}
| 9,555 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/system/EmptyIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.system;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
public class EmptyIterator implements InterruptibleIterator {
public static final EmptyIterator EMPTY_ITERATOR = new EmptyIterator();
@Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
IteratorEnvironment env) throws IOException {}
@Override
public boolean hasTop() {
return false;
}
@Override
public void next() throws IOException {
// nothing should call this since hasTop always returns false
throw new UnsupportedOperationException();
}
@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {}
@Override
public Key getTopKey() {
// nothing should call this since hasTop always returns false
throw new UnsupportedOperationException();
}
@Override
public Value getTopValue() {
// nothing should call this since hasTop always returns false
throw new UnsupportedOperationException();
}
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return EMPTY_ITERATOR;
}
@Override
public void setInterruptFlag(AtomicBoolean flag) {}
}
| 9,556 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/conf/ColumnSet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.conf;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.iteratorsImpl.conf.ColumnUtil.ColFamHashKey;
import org.apache.accumulo.core.iteratorsImpl.conf.ColumnUtil.ColHashKey;
import org.apache.accumulo.core.util.Pair;
import org.apache.hadoop.io.Text;
public class ColumnSet {
private Set<ColFamHashKey> objectsCF;
private Set<ColHashKey> objectsCol;
private ColHashKey lookupCol = new ColHashKey();
private ColFamHashKey lookupCF = new ColFamHashKey();
public ColumnSet() {
objectsCF = new HashSet<>();
objectsCol = new HashSet<>();
}
public ColumnSet(Collection<String> objectStrings) {
this();
for (String column : objectStrings) {
Pair<Text,Text> pcic = ColumnSet.decodeColumns(column);
if (pcic.getSecond() == null) {
add(pcic.getFirst());
} else {
add(pcic.getFirst(), pcic.getSecond());
}
}
}
protected void add(Text colf) {
objectsCF.add(new ColFamHashKey(new Text(colf)));
}
protected void add(Text colf, Text colq) {
objectsCol.add(new ColHashKey(colf, colq));
}
public boolean contains(Key key) {
// lookup column family and column qualifier
if (!objectsCol.isEmpty()) {
lookupCol.set(key);
if (objectsCol.contains(lookupCol)) {
return true;
}
}
// lookup just column family
if (!objectsCF.isEmpty()) {
lookupCF.set(key);
return objectsCF.contains(lookupCF);
}
return false;
}
public boolean isEmpty() {
return objectsCol.isEmpty() && objectsCF.isEmpty();
}
public static String encodeColumns(Text columnFamily, Text columnQualifier) {
StringBuilder sb = new StringBuilder();
encode(sb, columnFamily);
if (columnQualifier != null) {
sb.append(':');
encode(sb, columnQualifier);
}
return sb.toString();
}
static void encode(StringBuilder sb, Text t) {
for (int i = 0; i < t.getLength(); i++) {
int b = (0xff & t.getBytes()[i]);
// very inefficient code
if ((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9') || b == '_'
|| b == '-') {
sb.append((char) b);
} else {
sb.append('%');
sb.append(String.format("%02x", b));
}
}
}
public static boolean isValidEncoding(String enc) {
for (char c : enc.toCharArray()) {
boolean validChar = (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')
|| c == '_' || c == '-' || c == ':' || c == '%';
if (!validChar) {
return false;
}
}
return true;
}
public static Pair<Text,Text> decodeColumns(String columns) {
if (!isValidEncoding(columns)) {
throw new IllegalArgumentException("Invalid encoding " + columns);
}
String[] cols = columns.split(":");
if (cols.length == 1) {
return new Pair<>(decode(cols[0]), null);
} else if (cols.length == 2) {
return new Pair<>(decode(cols[0]), decode(cols[1]));
} else {
throw new IllegalArgumentException(columns);
}
}
static Text decode(String s) {
Text t = new Text();
byte[] sb = s.getBytes(UTF_8);
// very inefficient code
for (int i = 0; i < sb.length; i++) {
if (sb[i] == '%') {
int x = ++i;
int y = ++i;
if (y < sb.length) {
byte[] hex = {sb[x], sb[y]};
String hs = new String(hex, UTF_8);
int b = Integer.parseInt(hs, 16);
t.append(new byte[] {(byte) b}, 0, 1);
} else {
throw new IllegalArgumentException("Invalid characters in encoded string (" + s + ")."
+ " Expected two characters after '%'");
}
} else {
t.append(new byte[] {sb[i]}, 0, 1);
}
}
return t;
}
}
| 9,557 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/conf/ColumnToClassMapping.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.conf;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.accumulo.core.classloader.ClassLoaderUtil;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.iteratorsImpl.conf.ColumnUtil.ColFamHashKey;
import org.apache.accumulo.core.iteratorsImpl.conf.ColumnUtil.ColHashKey;
import org.apache.accumulo.core.util.Pair;
import org.apache.hadoop.io.Text;
public class ColumnToClassMapping<K> {
private HashMap<ColFamHashKey,K> objectsCF;
private HashMap<ColHashKey,K> objectsCol;
private ColHashKey lookupCol = new ColHashKey();
private ColFamHashKey lookupCF = new ColFamHashKey();
public ColumnToClassMapping() {
objectsCF = new HashMap<>();
objectsCol = new HashMap<>();
}
public ColumnToClassMapping(Map<String,String> objectStrings, Class<? extends K> c)
throws ReflectiveOperationException, IOException {
this(objectStrings, c, null);
}
public ColumnToClassMapping(Map<String,String> objectStrings, Class<? extends K> c,
String context) throws ReflectiveOperationException, IOException {
this();
for (Entry<String,String> entry : objectStrings.entrySet()) {
String column = entry.getKey();
String className = entry.getValue();
Pair<Text,Text> pcic = ColumnSet.decodeColumns(column);
Class<? extends K> clazz = ClassLoaderUtil.loadClass(context, className, c);
K inst = clazz.getDeclaredConstructor().newInstance();
if (pcic.getSecond() == null) {
addObject(pcic.getFirst(), inst);
} else {
addObject(pcic.getFirst(), pcic.getSecond(), inst);
}
}
}
protected void addObject(Text colf, K obj) {
objectsCF.put(new ColFamHashKey(new Text(colf)), obj);
}
protected void addObject(Text colf, Text colq, K obj) {
objectsCol.put(new ColHashKey(colf, colq), obj);
}
public K getObject(Key key) {
K obj = null;
// lookup column family and column qualifier
if (!objectsCol.isEmpty()) {
lookupCol.set(key);
obj = objectsCol.get(lookupCol);
if (obj != null) {
return obj;
}
}
// lookup just column family
if (!objectsCF.isEmpty()) {
lookupCF.set(key);
obj = objectsCF.get(lookupCF);
}
return obj;
}
public boolean isEmpty() {
return objectsCol.isEmpty() && objectsCF.isEmpty();
}
}
| 9,558 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/iteratorsImpl/conf/ColumnUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.iteratorsImpl.conf;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.hadoop.io.Text;
public class ColumnUtil {
private static int hash(byte[] bytes, int offset, int len) {
int hash = 1;
int end = offset + len;
for (int i = offset; i < end; i++) {
hash = (31 * hash) + bytes[i];
}
return hash;
}
private static int hash(ByteSequence bs) {
return hash(bs.getBackingArray(), bs.offset(), bs.length());
}
public static class ColFamHashKey {
Text columnFamily;
Key key;
private int hashCode;
ColFamHashKey() {
columnFamily = null;
}
ColFamHashKey(Text cf) {
columnFamily = cf;
hashCode = hash(columnFamily.getBytes(), 0, columnFamily.getLength());
}
void set(Key key) {
this.key = key;
hashCode = hash(key.getColumnFamilyData());
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object o) {
if (o instanceof ColFamHashKey) {
return equals((ColFamHashKey) o);
}
return false;
}
public boolean equals(ColFamHashKey ohk) {
if (columnFamily == null) {
return key.compareColumnFamily(ohk.columnFamily) == 0;
}
return ohk.key.compareColumnFamily(columnFamily) == 0;
}
}
public static class ColHashKey {
Text columnFamily;
Text columnQualifier;
Key key;
private int hashValue;
ColHashKey() {
columnFamily = null;
columnQualifier = null;
}
ColHashKey(Text cf, Text cq) {
columnFamily = cf;
columnQualifier = cq;
hashValue = hash(columnFamily.getBytes(), 0, columnFamily.getLength())
+ hash(columnQualifier.getBytes(), 0, columnQualifier.getLength());
}
void set(Key key) {
this.key = key;
hashValue = hash(key.getColumnFamilyData()) + hash(key.getColumnQualifierData());
}
@Override
public int hashCode() {
return hashValue;
}
@Override
public boolean equals(Object o) {
if (o instanceof ColHashKey) {
return equals((ColHashKey) o);
}
return false;
}
public boolean equals(ColHashKey ohk) {
if (columnFamily == null) {
return key.compareColumnFamily(ohk.columnFamily) == 0
&& key.compareColumnQualifier(ohk.columnQualifier) == 0;
}
return ohk.key.compareColumnFamily(columnFamily) == 0
&& ohk.key.compareColumnQualifier(columnQualifier) == 0;
}
}
}
| 9,559 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/trace/TraceWrappedRunnable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.trace;
import java.util.Objects;
import org.apache.accumulo.core.util.threads.ThreadPools;
import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
/**
* A class to wrap {@link Runnable}s for {@link ThreadPools} in a way that still provides access to
* the wrapped {@link Runnable} instance. This supersedes the use of {@link Context#wrap(Runnable)}.
*/
class TraceWrappedRunnable implements Runnable {
private final Context context;
private final Runnable unwrapped;
static Runnable unwrapFully(Runnable r) {
while (r instanceof TraceWrappedRunnable) {
r = ((TraceWrappedRunnable) r).unwrapped;
}
return r;
}
TraceWrappedRunnable(Runnable other) {
this.context = Context.current();
this.unwrapped = unwrapFully(other);
}
@Override
public void run() {
try (Scope unused = context.makeCurrent()) {
unwrapped.run();
}
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TraceWrappedRunnable) {
return Objects.equals(unwrapped, ((TraceWrappedRunnable) obj).unwrapped);
}
return false;
}
@Override
public int hashCode() {
return unwrapped.hashCode();
}
}
| 9,560 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/trace/TraceWrappedCallable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.trace;
import java.util.Objects;
import java.util.concurrent.Callable;
import org.apache.accumulo.core.util.threads.ThreadPools;
import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
/**
* A class to wrap {@link Callable}s for {@link ThreadPools} in a way that still provides access to
* the wrapped {@link Callable} instance. This supersedes the use of {@link Context#wrap(Callable)}.
*/
class TraceWrappedCallable<V> implements Callable<V> {
private final Context context;
private final Callable<V> unwrapped;
static <C> Callable<C> unwrapFully(Callable<C> c) {
while (c instanceof TraceWrappedCallable) {
c = ((TraceWrappedCallable<C>) c).unwrapped;
}
return c;
}
TraceWrappedCallable(Callable<V> other) {
this.context = Context.current();
this.unwrapped = unwrapFully(other);
}
@Override
public V call() throws Exception {
try (Scope unused = context.makeCurrent()) {
return unwrapped.call();
}
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof TraceWrappedCallable) {
return Objects.equals(unwrapped, ((TraceWrappedCallable<?>) obj).unwrapped);
}
return false;
}
@Override
public int hashCode() {
return unwrapped.hashCode();
}
}
| 9,561 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/trace/TraceUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.trace;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Proxy;
import java.util.Map;
import java.util.concurrent.Callable;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.clientImpl.thrift.TInfo;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.opentelemetry.api.GlobalOpenTelemetry;
import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.api.trace.SpanBuilder;
import io.opentelemetry.api.trace.SpanKind;
import io.opentelemetry.api.trace.StatusCode;
import io.opentelemetry.api.trace.Tracer;
import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator;
import io.opentelemetry.context.Context;
import io.opentelemetry.context.Scope;
import io.opentelemetry.context.propagation.TextMapGetter;
import io.opentelemetry.semconv.trace.attributes.SemanticAttributes;
public class TraceUtil {
public static final Logger LOG = LoggerFactory.getLogger(TraceUtil.class);
private static final String SPAN_FORMAT = "%s::%s";
private static volatile boolean enabled = true;
public static void initializeTracer(AccumuloConfiguration conf) {
enabled = conf.getBoolean(Property.GENERAL_OPENTELEMETRY_ENABLED);
logTracingState();
}
private static void logTracingState() {
var msg = "Trace enabled in Accumulo: {}, OpenTelemetry instance: {}, Tracer instance: {}";
var enabledInAccumulo = enabled ? "yes" : "no";
var openTelemetry = getOpenTelemetry();
var tracer = getTracer(openTelemetry);
LOG.info(msg, enabledInAccumulo, openTelemetry.getClass(), tracer.getClass());
}
private static OpenTelemetry getOpenTelemetry() {
return enabled ? GlobalOpenTelemetry.get() : OpenTelemetry.noop();
}
private static Tracer getTracer(OpenTelemetry ot) {
return ot.getTracer(Constants.APPNAME, Constants.VERSION);
}
public static Span startSpan(Class<?> caller, String spanName) {
return startSpan(caller, spanName, null, null, null);
}
public static Span startSpan(Class<?> caller, String spanName, Map<String,String> attributes) {
return startSpan(caller, spanName, null, attributes, null);
}
public static Span startClientRpcSpan(Class<?> caller, String spanName) {
return startSpan(caller, spanName, SpanKind.CLIENT, null, null);
}
public static Span startFateSpan(Class<?> caller, String spanName, TInfo tinfo) {
return startSpan(caller, spanName, null, null, tinfo);
}
public static Span startServerRpcSpan(Class<?> caller, String spanName, TInfo tinfo) {
return startSpan(caller, spanName, SpanKind.SERVER, null, tinfo);
}
private static Span startSpan(Class<?> caller, String spanName, SpanKind kind,
Map<String,String> attributes, TInfo tinfo) {
if (!enabled && !Span.current().getSpanContext().isValid()) {
return Span.getInvalid();
}
final String name = String.format(SPAN_FORMAT, caller.getSimpleName(), spanName);
final SpanBuilder builder = getTracer(getOpenTelemetry()).spanBuilder(name);
if (kind != null) {
builder.setSpanKind(kind);
}
if (attributes != null) {
attributes.forEach(builder::setAttribute);
}
if (tinfo != null) {
builder.setParent(getContext(tinfo));
}
return builder.startSpan();
}
/**
* Record that an Exception occurred in the code covered by a Span
*
* @param span the span
* @param e the exception
* @param rethrown whether the exception is subsequently re-thrown
*/
public static void setException(Span span, Throwable e, boolean rethrown) {
if (enabled) {
span.setStatus(StatusCode.ERROR);
span.recordException(e,
Attributes.builder().put(SemanticAttributes.EXCEPTION_TYPE, e.getClass().getName())
.put(SemanticAttributes.EXCEPTION_MESSAGE, e.getMessage())
.put(SemanticAttributes.EXCEPTION_ESCAPED, rethrown).build());
}
}
/**
* Obtain {@link org.apache.accumulo.core.clientImpl.thrift.TInfo} for the current context. This
* is used to send the current trace information to a remote process
*/
public static TInfo traceInfo() {
TInfo tinfo = new TInfo();
W3CTraceContextPropagator.getInstance().inject(Context.current(), tinfo, TInfo::putToHeaders);
return tinfo;
}
/**
* Returns a newly created Context from the TInfo object sent by a remote process. The Context can
* then be used in this process to continue the tracing. The Context is used like:
*
* <pre>
* Context remoteCtx = getContext(tinfo);
* Span span = tracer.spanBuilder(name).setParent(remoteCtx).startSpan()
* </pre>
*
* @param tinfo tracing information serialized over Thrift
*/
private static Context getContext(TInfo tinfo) {
return W3CTraceContextPropagator.getInstance().extract(Context.current(), tinfo,
new TextMapGetter<TInfo>() {
@Override
public Iterable<String> keys(TInfo carrier) {
if (carrier.getHeaders() == null) {
return null;
}
return carrier.getHeaders().keySet();
}
@Override
public String get(TInfo carrier, String key) {
if (carrier.getHeaders() == null) {
return null;
}
return carrier.getHeaders().get(key);
}
});
}
public static Runnable wrap(Runnable r) {
return r instanceof TraceWrappedRunnable ? r : new TraceWrappedRunnable(r);
}
public static Runnable unwrap(Runnable r) {
return TraceWrappedRunnable.unwrapFully(r);
}
public static <T> Callable<T> wrap(Callable<T> c) {
return c instanceof TraceWrappedCallable ? c : new TraceWrappedCallable<>(c);
}
public static <T> Callable<T> unwrap(Callable<T> c) {
return TraceWrappedCallable.unwrapFully(c);
}
public static <T> T wrapService(final T instance) {
InvocationHandler handler = (obj, method, args) -> {
if (args == null || args.length < 1 || args[0] == null || !(args[0] instanceof TInfo)) {
try {
return method.invoke(instance, args);
} catch (InvocationTargetException e) {
throw e.getCause();
}
}
Span span = startServerRpcSpan(instance.getClass(), method.getName(), (TInfo) args[0]);
try (Scope scope = span.makeCurrent()) {
return method.invoke(instance, args);
} catch (Exception e) {
Throwable t = e instanceof InvocationTargetException ? e.getCause() : e;
setException(span, t, true);
throw t;
} finally {
span.end();
}
};
return wrapRpc(handler, instance);
}
private static <T> T wrapRpc(final InvocationHandler handler, final T instance) {
@SuppressWarnings("unchecked")
T proxiedInstance = (T) Proxy.newProxyInstance(instance.getClass().getClassLoader(),
instance.getClass().getInterfaces(), handler);
return proxiedInstance;
}
}
| 9,562 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/constraints/Violations.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.constraints;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.core.data.ConstraintViolationSummary;
/**
* A class for accumulating constraint violations across a number of mutations.
*/
public class Violations {
private static class CVSKey {
final private String className;
final private short vcode;
CVSKey(ConstraintViolationSummary cvs) {
this.className = cvs.constrainClass;
this.vcode = cvs.violationCode;
}
@Override
public int hashCode() {
return className.hashCode() + vcode;
}
@Override
public boolean equals(Object o) {
if (o instanceof CVSKey) {
return equals((CVSKey) o);
}
return false;
}
public boolean equals(CVSKey ocvsk) {
return className.equals(ocvsk.className) && vcode == ocvsk.vcode;
}
}
public static final Violations EMPTY = new Violations(Collections.emptyMap());
private Map<CVSKey,ConstraintViolationSummary> cvsmap;
/**
* Creates a new empty object.
*/
public Violations() {
cvsmap = new HashMap<>();
}
private Violations(Map<CVSKey,ConstraintViolationSummary> cvsmap) {
this.cvsmap = cvsmap;
}
/**
* Checks if this object is empty, i.e., that no violations have been added.
*
* @return true if empty
*/
public boolean isEmpty() {
return cvsmap.isEmpty();
}
private void add(CVSKey cvsk, ConstraintViolationSummary cvs) {
ConstraintViolationSummary existingCvs = cvsmap.get(cvsk);
if (existingCvs == null) {
cvsmap.put(cvsk, cvs);
} else {
existingCvs.numberOfViolatingMutations += cvs.numberOfViolatingMutations;
}
}
/**
* Adds a violation. If a matching violation was already added, then its count is increased.
*
* @param cvs summary of violation
*/
public void add(ConstraintViolationSummary cvs) {
CVSKey cvsk = new CVSKey(cvs);
add(cvsk, cvs);
}
/**
* Adds all violations from the given object to this one.
*
* @param violations violations to add
*/
public void add(Violations violations) {
Set<Entry<CVSKey,ConstraintViolationSummary>> es = violations.cvsmap.entrySet();
for (Entry<CVSKey,ConstraintViolationSummary> entry : es) {
add(entry.getKey(), entry.getValue());
}
}
/**
* Adds a list of violations.
*
* @param cvsList list of violation summaries
*/
public void add(List<ConstraintViolationSummary> cvsList) {
for (ConstraintViolationSummary constraintViolationSummary : cvsList) {
add(constraintViolationSummary);
}
}
/**
* Gets the violations as a list of summaries.
*
* @return list of violation summaries
*/
public List<ConstraintViolationSummary> asList() {
return new ArrayList<>(cvsmap.values());
}
}
| 9,563 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/MapCounter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.HashMap;
import java.util.Set;
import java.util.stream.LongStream;
/**
* A Map counter for counting with longs or integers. Not thread safe.
*/
public class MapCounter<KT> {
static class MutableLong {
long l = 0L;
}
private final HashMap<KT,MutableLong> map;
public MapCounter() {
map = new HashMap<>();
}
public long increment(KT key, long l) {
MutableLong ml = map.computeIfAbsent(key, KT -> new MutableLong());
ml.l += l;
if (ml.l == 0) {
map.remove(key);
}
return ml.l;
}
public long decrement(KT key, long l) {
return increment(key, -1 * l);
}
public long get(KT key) {
MutableLong ml = map.get(key);
if (ml == null) {
return 0;
}
return ml.l;
}
public int getInt(KT key) {
return Math.toIntExact(get(key));
}
public Set<KT> keySet() {
return map.keySet();
}
public LongStream values() {
return map.values().stream().mapToLong(mutLong -> mutLong.l);
}
public long max() {
return values().max().orElse(0);
}
public int size() {
return map.size();
}
}
| 9,564 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/AddressUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.security.Security;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.net.HostAndPort;
public class AddressUtil {
private static final Logger log = LoggerFactory.getLogger(AddressUtil.class);
public static HostAndPort parseAddress(String address, boolean ignoreMissingPort)
throws NumberFormatException {
address = address.replace('+', ':');
HostAndPort hap = HostAndPort.fromString(address);
if (!ignoreMissingPort && !hap.hasPort()) {
throw new IllegalArgumentException(
"Address was expected to contain port. address=" + address);
}
return hap;
}
public static HostAndPort parseAddress(String address, int defaultPort) {
return parseAddress(address, true).withDefaultPort(defaultPort);
}
/**
* Fetch the security value that determines how long DNS failures are cached. Looks up the
* security property 'networkaddress.cache.negative.ttl'. Should that fail returns the default
* value used in the Oracle JVM 1.4+, which is 10 seconds.
*
* @param originalException the host lookup that is the source of needing this lookup. maybe be
* null.
* @return positive integer number of seconds
* @see InetAddress
* @throws IllegalArgumentException if dns failures are cached forever
*/
public static int getAddressCacheNegativeTtl(UnknownHostException originalException) {
int negativeTtl = 10;
try {
negativeTtl = Integer.parseInt(Security.getProperty("networkaddress.cache.negative.ttl"));
} catch (NumberFormatException exception) {
log.warn("Failed to get JVM negative DNS response cache TTL due to format problem "
+ "(e.g. this JVM might not have the property). "
+ "Falling back to default based on Oracle JVM 1.4+ (10s)", exception);
} catch (SecurityException exception) {
log.warn("Failed to get JVM negative DNS response cache TTL due to security manager. "
+ "Falling back to default based on Oracle JVM 1.4+ (10s)", exception);
}
if (negativeTtl == -1) {
log.error(
"JVM negative DNS response cache TTL is set to 'forever' and host lookup failed. "
+ "TTL can be changed with security property "
+ "'networkaddress.cache.negative.ttl', see java.net.InetAddress.",
originalException);
throw new IllegalArgumentException(originalException);
} else if (negativeTtl < 0) {
log.warn("JVM specified negative DNS response cache TTL was negative (and not 'forever'). "
+ "Falling back to default based on Oracle JVM 1.4+ (10s)");
negativeTtl = 10;
}
return negativeTtl;
}
}
| 9,565 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Retry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* Encapsulates the retrying implementation for some operation. Provides bounded retry attempts with
* a bounded, linear backoff.
*/
public class Retry {
private long maxRetries; // not final for testing
private long waitIncrement; // not final for testing
private long maxWait; // not final for testing
private final long logIntervalNanoSec;
private double backOffFactor;
private long retriesDone;
private long currentWait;
private long initialWait;
private boolean hasNeverLogged;
private boolean hasLoggedWarn = false;
private long lastRetryLog;
private double currentBackOffFactor;
private boolean doTimeJitter = true;
/**
* @param maxRetries Maximum times to retry or MAX_RETRY_DISABLED if no maximum
* @param startWait The amount of time (ms) to wait for the initial retry
* @param maxWait The maximum wait (ms)
* @param waitIncrement The amount of time (ms) to increment next wait time by
* @param logInterval The amount of time (ms) between logging retries
*/
private Retry(long maxRetries, long startWait, long waitIncrement, long maxWait, long logInterval,
double backOffFactor) {
this.maxRetries = maxRetries;
this.maxWait = maxWait;
this.waitIncrement = waitIncrement;
this.retriesDone = 0;
this.currentWait = startWait;
this.initialWait = startWait;
this.logIntervalNanoSec = MILLISECONDS.toNanos(logInterval);
this.hasNeverLogged = true;
this.lastRetryLog = -1;
this.backOffFactor = backOffFactor;
this.currentBackOffFactor = this.backOffFactor;
}
// Visible for testing
@VisibleForTesting
public void setBackOffFactor(double baskOffFactor) {
this.backOffFactor = baskOffFactor;
this.currentBackOffFactor = this.backOffFactor;
}
// Visible for testing
@VisibleForTesting
public double getWaitFactor() {
return backOffFactor;
}
// Visible for testing
@VisibleForTesting
long getMaxRetries() {
return maxRetries;
}
// Visible for testing
@VisibleForTesting
long getCurrentWait() {
return currentWait;
}
// Visible for testing
@VisibleForTesting
long getWaitIncrement() {
return waitIncrement;
}
// Visible for testing
@VisibleForTesting
long getMaxWait() {
return maxWait;
}
// Visible for testing
@VisibleForTesting
void setMaxRetries(long maxRetries) {
this.maxRetries = maxRetries;
}
// Visible for testing
@VisibleForTesting
void setStartWait(long startWait) {
this.currentWait = startWait;
this.initialWait = startWait;
}
// Visible for testing
@VisibleForTesting
void setWaitIncrement(long waitIncrement) {
this.waitIncrement = waitIncrement;
}
// Visible for testing
@VisibleForTesting
void setMaxWait(long maxWait) {
this.maxWait = maxWait;
}
// Visible for testing
@VisibleForTesting
void setDoTimeJitter(boolean jitter) {
doTimeJitter = jitter;
}
public boolean hasInfiniteRetries() {
return maxRetries < 0;
}
public long getLogInterval() {
return NANOSECONDS.toMillis(logIntervalNanoSec);
}
public boolean canRetry() {
return hasInfiniteRetries() || (retriesDone < maxRetries);
}
public void useRetry() {
if (!canRetry()) {
throw new IllegalStateException("No retries left");
}
retriesDone++;
}
public boolean hasRetried() {
return retriesDone > 0;
}
public long retriesCompleted() {
return retriesDone;
}
public void waitForNextAttempt(Logger log, String operationDescription)
throws InterruptedException {
double waitFactor = (1 + (RANDOM.get().nextDouble() - 0.5) / 10.0) * currentBackOffFactor;
if (!doTimeJitter) {
waitFactor = currentBackOffFactor;
}
currentBackOffFactor = currentBackOffFactor * backOffFactor;
log.debug("Sleeping for {}ms before retrying operation : {} ", currentWait,
operationDescription);
sleep(currentWait);
if (backOffFactor == 1) {
currentWait = Math.min(maxWait, currentWait + waitIncrement);
} else if (backOffFactor > 1.0) {
if (currentWait < maxWait) {
waitIncrement = (long) Math.ceil(waitFactor * this.initialWait);
currentWait = Math.min(maxWait, initialWait + waitIncrement);
}
}
}
protected void sleep(long wait) throws InterruptedException {
Thread.sleep(wait);
}
public void logRetry(Logger log, String message, Throwable t) {
// log the first time as debug, and then after every logInterval as a warning
long now = System.nanoTime();
if (hasNeverLogged) {
if (log.isDebugEnabled()) {
log.debug(getMessage(message, t));
}
hasNeverLogged = false;
lastRetryLog = now;
} else if ((now - lastRetryLog) > logIntervalNanoSec) {
log.warn(getMessage(message), t);
lastRetryLog = now;
hasLoggedWarn = true;
} else {
if (log.isTraceEnabled()) {
log.trace(getMessage(message, t));
}
}
}
public void logRetry(Logger log, String message) {
// log the first time as debug, and then after every logInterval as a warning
long now = System.nanoTime();
if (hasNeverLogged) {
if (log.isDebugEnabled()) {
log.debug(getMessage(message));
}
hasNeverLogged = false;
lastRetryLog = now;
} else if ((now - lastRetryLog) > logIntervalNanoSec) {
log.warn(getMessage(message));
lastRetryLog = now;
hasLoggedWarn = true;
} else {
if (log.isTraceEnabled()) {
log.trace(getMessage(message));
}
}
}
private String getMessage(String message) {
return message + ", retrying attempt " + (retriesDone + 1) + " (suppressing retry messages for "
+ getLogInterval() + "ms)";
}
private String getMessage(String message, Throwable t) {
return message + ":" + t + ", retrying attempt " + (retriesDone + 1)
+ " (suppressing retry messages for " + getLogInterval() + "ms)";
}
public void logCompletion(Logger log, String operationDescription) {
if (!hasNeverLogged) {
var message = operationDescription + " completed after " + (retriesDone + 1)
+ " retries and is no longer retrying.";
if (hasLoggedWarn) {
log.info(message);
} else {
log.debug(message);
}
}
}
public interface NeedsRetries {
/**
* @return this builder with the maximum number of retries set to unlimited
*/
NeedsRetryDelay infiniteRetries();
/**
* @param max the maximum number of retries to set
* @return this builder with the maximum number of retries set to the provided value
*/
NeedsRetryDelay maxRetries(long max);
}
public interface NeedsRetryDelay {
/**
* @param duration the amount of time to wait before the first retry; input is converted to
* milliseconds, rounded down to the nearest
* @return this builder with the initial wait period set
*/
NeedsTimeIncrement retryAfter(long duration, TimeUnit unit);
}
public interface NeedsTimeIncrement {
/**
* @param duration the amount of additional time to add before each subsequent retry; input is
* converted to milliseconds, rounded down to the nearest
* @return this builder with the increment amount set
*/
NeedsMaxWait incrementBy(long duration, TimeUnit unit);
}
public interface NeedsMaxWait {
/**
* @param duration the maximum amount of time to which the waiting period between retries can be
* incremented; input is converted to milliseconds, rounded down to the nearest
* @return this builder with a maximum time limit set
*/
NeedsBackOffFactor maxWait(long duration, TimeUnit unit);
}
public interface NeedsBackOffFactor {
/**
* @param backOffFactor the number that the wait increment will be successively multiplied by to
* make the time between retries to be exponentially increasing. The default value will
* be one.
*/
NeedsLogInterval backOffFactor(double backOffFactor);
}
public interface NeedsLogInterval {
/**
* @param duration the minimum time interval between logging that a retry is occurring; input is
* converted to milliseconds, rounded down to the nearest
* @return this builder with a logging interval set
*/
BuilderDone logInterval(long duration, TimeUnit unit);
}
public interface BuilderDone {
/**
* Create a RetryFactory from this builder which can be used to create many Retry objects with
* the same settings.
*
* @return this builder as a factory; intermediate references to this builder cannot be used to
* change options after this has been called
*/
RetryFactory createFactory();
/**
* Create a single Retry object with the currently configured builder settings.
*
* @return a retry object from this builder's settings
*/
Retry createRetry();
}
public interface RetryFactory {
/**
* Create a single Retry object from this factory's settings.
*
* @return a retry object from this factory's settings
*/
Retry createRetry();
}
public static NeedsRetries builder() {
return new RetryFactoryBuilder();
}
private static class RetryFactoryBuilder
implements NeedsRetries, NeedsRetryDelay, NeedsTimeIncrement, NeedsMaxWait, NeedsLogInterval,
NeedsBackOffFactor, BuilderDone, RetryFactory {
private boolean modifiable = true;
private long maxRetries;
private long initialWait;
private long maxWait;
private long waitIncrement;
private long logInterval;
private double backOffFactor = 1.5;
RetryFactoryBuilder() {}
private void checkState() {
Preconditions.checkState(modifiable,
"Cannot modify this builder once 'createFactory()' has been called");
}
@Override
public NeedsRetryDelay infiniteRetries() {
checkState();
this.maxRetries = -1;
return this;
}
@Override
public NeedsRetryDelay maxRetries(long max) {
checkState();
Preconditions.checkArgument(max >= 0, "Maximum number of retries must not be negative");
this.maxRetries = max;
return this;
}
@Override
public NeedsTimeIncrement retryAfter(long duration, TimeUnit unit) {
checkState();
Preconditions.checkArgument(duration >= 0, "Initial waiting period must not be negative");
this.initialWait = unit.toMillis(duration);
return this;
}
@Override
public NeedsMaxWait incrementBy(long duration, TimeUnit unit) {
checkState();
Preconditions.checkArgument(duration >= 0,
"Amount of time to increment the wait between each retry must not be negative");
this.waitIncrement = unit.toMillis(duration);
return this;
}
@Override
public NeedsLogInterval backOffFactor(double factor) {
checkState();
Preconditions.checkArgument(factor >= 1,
"backOffFactor exponent that increases the wait between each retry and must greater than one");
this.backOffFactor = factor;
return this;
}
@Override
public NeedsBackOffFactor maxWait(long duration, TimeUnit unit) {
checkState();
this.maxWait = unit.toMillis(duration);
Preconditions.checkArgument(maxWait >= initialWait,
"Maximum wait between retries must not be less than the initial delay");
return this;
}
@Override
public BuilderDone logInterval(long duration, TimeUnit unit) {
checkState();
Preconditions.checkArgument(duration >= 0,
"The amount of time between logging retries must not be negative");
this.logInterval = unit.toMillis(duration);
return this;
}
@Override
public RetryFactory createFactory() {
this.modifiable = false;
return this;
}
@Override
public Retry createRetry() {
return new Retry(maxRetries, initialWait, waitIncrement, maxWait, logInterval, backOffFactor);
}
}
}
| 9,566 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ByteArrayComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.io.Serializable;
import java.util.Comparator;
public class ByteArrayComparator implements Comparator<byte[]>, Serializable {
private static final long serialVersionUID = 1L;
@Override
public int compare(byte[] o1, byte[] o2) {
int minLen = Math.min(o1.length, o2.length);
for (int i = 0; i < minLen; i++) {
int a = (o1[i] & 0xff);
int b = (o2[i] & 0xff);
if (a != b) {
return a - b;
}
}
return o1.length - o2.length;
}
}
| 9,567 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ByteBufferUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.ByteArrayInputStream;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.TableId;
import org.apache.hadoop.io.Text;
public class ByteBufferUtil {
public static byte[] toBytes(ByteBuffer buffer) {
if (buffer == null) {
return null;
}
if (buffer.hasArray()) {
// did not use buffer.get() because it changes the position
return Arrays.copyOfRange(buffer.array(), buffer.position() + buffer.arrayOffset(),
buffer.limit() + buffer.arrayOffset());
} else {
byte[] data = new byte[buffer.remaining()];
// duplicate inorder to avoid changing position
buffer.duplicate().get(data);
return data;
}
}
public static List<ByteBuffer> toByteBuffers(Collection<byte[]> bytesList) {
if (bytesList == null) {
return null;
}
ArrayList<ByteBuffer> result = new ArrayList<>();
for (byte[] bytes : bytesList) {
result.add(ByteBuffer.wrap(bytes));
}
return result;
}
public static List<byte[]> toBytesList(Collection<ByteBuffer> bytesList) {
if (bytesList == null) {
return null;
}
ArrayList<byte[]> result = new ArrayList<>(bytesList.size());
for (ByteBuffer bytes : bytesList) {
result.add(toBytes(bytes));
}
return result;
}
public static Set<String> toStringSet(Collection<ByteBuffer> bytesList) {
if (bytesList == null) {
return null;
}
Set<String> result = new HashSet<>(bytesList.size());
for (ByteBuffer bytes : bytesList) {
result.add(toString(bytes));
}
return result;
}
public static Text toText(ByteBuffer byteBuffer) {
if (byteBuffer == null) {
return null;
}
if (byteBuffer.hasArray()) {
Text result = new Text();
result.set(byteBuffer.array(), byteBuffer.arrayOffset() + byteBuffer.position(),
byteBuffer.remaining());
return result;
} else {
return new Text(toBytes(byteBuffer));
}
}
public static String toString(ByteBuffer bytes) {
if (bytes.hasArray()) {
return new String(bytes.array(), bytes.arrayOffset() + bytes.position(), bytes.remaining(),
UTF_8);
} else {
return new String(toBytes(bytes), UTF_8);
}
}
public static TableId toTableId(ByteBuffer bytes) {
return TableId.of(toString(bytes));
}
public static ByteBuffer toByteBuffers(ByteSequence bs) {
if (bs == null) {
return null;
}
if (bs.isBackedByArray()) {
return ByteBuffer.wrap(bs.getBackingArray(), bs.offset(), bs.length());
} else {
return ByteBuffer.wrap(bs.toArray());
}
}
public static void write(DataOutput out, ByteBuffer buffer) throws IOException {
if (buffer.hasArray()) {
out.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
} else {
out.write(toBytes(buffer));
}
}
public static ByteArrayInputStream toByteArrayInputStream(ByteBuffer buffer) {
if (buffer.hasArray()) {
return new ByteArrayInputStream(buffer.array(), buffer.arrayOffset() + buffer.position(),
buffer.remaining());
} else {
return new ByteArrayInputStream(toBytes(buffer));
}
}
}
| 9,568 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/UnsynchronizedBuffer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.WritableUtils;
/**
* A utility class for reading and writing bytes to byte buffers without synchronization.
*/
public class UnsynchronizedBuffer {
// created this little class instead of using ByteArrayOutput stream and DataOutputStream
// because both are synchronized... lots of small syncs slow things down
/**
* A byte buffer writer.
*/
public static class Writer {
int offset = 0;
byte[] data;
/**
* Creates a new writer.
*/
public Writer() {
data = new byte[64];
}
/**
* Creates a new writer.
*
* @param initialCapacity initial byte capacity
*/
public Writer(int initialCapacity) {
data = new byte[initialCapacity];
}
private void reserve(int l) {
if (offset + l > data.length) {
int newSize = UnsynchronizedBuffer.nextArraySize(offset + l);
byte[] newData = new byte[newSize];
System.arraycopy(data, 0, newData, 0, offset);
data = newData;
}
}
/**
* Adds bytes to this writer's buffer.
*
* @param bytes byte array
* @param off offset into array to start copying bytes
* @param length number of bytes to add
* @throws IndexOutOfBoundsException if off or length are invalid
*/
public void add(byte[] bytes, int off, int length) {
reserve(length);
System.arraycopy(bytes, off, data, offset, length);
offset += length;
}
/**
* Adds a Boolean value to this writer's buffer.
*
* @param b Boolean value
*/
public void add(boolean b) {
reserve(1);
if (b) {
data[offset++] = 1;
} else {
data[offset++] = 0;
}
}
/**
* Gets (a copy of) the contents of this writer's buffer.
*
* @return byte buffer contents
*/
public byte[] toArray() {
byte[] ret = new byte[offset];
System.arraycopy(data, 0, ret, 0, offset);
return ret;
}
/**
* Gets a <code>ByteBuffer</code> wrapped around this writer's buffer.
*
* @return byte buffer
*/
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(data, 0, offset);
}
/**
* Adds an integer value to this writer's buffer. The integer is encoded as a variable-length
* list of bytes. See {@link #writeVLong(long)} for a description of the encoding.
*
* @param i integer value
*/
public void writeVInt(int i) {
writeVLong(i);
}
/**
* Adds a long value to this writer's buffer. The long is encoded as a variable-length list of
* bytes. For a description of the encoding scheme, see <code>WritableUtils.writeVLong()</code>
* in the Hadoop API. [<a href=
* "https://hadoop.apache.org/docs/stable/api/org/apache/hadoop/io/WritableUtils.html#writeVLong%28java.io.DataOutput,%20long%29">link</a>]
*
* @param i long value
*/
public void writeVLong(long i) {
reserve(9);
offset = UnsynchronizedBuffer.writeVLong(data, offset, i);
}
public int size() {
return offset;
}
}
/**
* A byte buffer reader.
*/
public static class Reader {
int offset;
byte[] data;
/**
* Creates a new reader.
*
* @param b bytes to read
*/
public Reader(byte[] b) {
this.data = b;
}
/**
* Creates a new reader.
*
* @param buffer byte buffer containing bytes to read
*/
public Reader(ByteBuffer buffer) {
if (buffer.hasArray() && buffer.array().length == buffer.arrayOffset() + buffer.limit()) {
offset = buffer.arrayOffset() + buffer.position();
data = buffer.array();
} else {
offset = 0;
data = ByteBufferUtil.toBytes(buffer);
}
}
/**
* Reads an integer value from this reader's buffer.
*
* @return integer value
*/
public int readInt() {
return (data[offset++] << 24) + ((data[offset++] & 255) << 16) + ((data[offset++] & 255) << 8)
+ ((data[offset++] & 255) << 0);
}
/**
* Reads a long value from this reader's buffer.
*
* @return long value
*/
public long readLong() {
return (((long) data[offset++] << 56) + ((long) (data[offset++] & 255) << 48)
+ ((long) (data[offset++] & 255) << 40) + ((long) (data[offset++] & 255) << 32)
+ ((long) (data[offset++] & 255) << 24) + ((data[offset++] & 255) << 16)
+ ((data[offset++] & 255) << 8) + ((data[offset++] & 255) << 0));
}
/**
* Reads bytes from this reader's buffer, filling the given byte array.
*
* @param b byte array to fill
*/
public void readBytes(byte[] b) {
System.arraycopy(data, offset, b, 0, b.length);
offset += b.length;
}
/**
* Reads a Boolean value from this reader's buffer.
*
* @return Boolean value
*/
public boolean readBoolean() {
return (data[offset++] == 1);
}
/**
* Reads an integer value from this reader's buffer, assuming the integer was encoded as a
* variable-length list of bytes.
*
* @return integer value
*/
public int readVInt() {
return (int) readVLong();
}
/**
* Reads a long value from this reader's buffer, assuming the long was encoded as a
* variable-length list of bytes.
*
* @return long value
*/
public long readVLong() {
byte firstByte = data[offset++];
int len = WritableUtils.decodeVIntSize(firstByte);
if (len == 1) {
return firstByte;
}
long i = 0;
for (int idx = 0; idx < len - 1; idx++) {
byte b = data[offset++];
i = i << 8;
i = i | (b & 0xFF);
}
return (WritableUtils.isNegativeVInt(firstByte) ? (i ^ -1L) : i);
}
}
/**
* Determines what next array size should be by rounding up to next power of two.
*
* @param i current array size
* @return next array size
* @throws IllegalArgumentException if i is negative
*/
public static int nextArraySize(int i) {
if (i < 0) {
throw new IllegalArgumentException();
}
if (i > (1 << 30)) {
// this is the next power of 2 minus 8... a special case taken from ArrayList limits
// because some JVMs can't allocate an array that large
return Integer.MAX_VALUE - 8;
}
if (i == 0) {
return 1;
}
// round up to next power of two
int ret = i;
ret--;
ret |= ret >> 1;
ret |= ret >> 2;
ret |= ret >> 4;
ret |= ret >> 8;
ret |= ret >> 16;
ret++;
return ret;
}
/**
* Use the provided byte[] to buffer only the bytes used to write out the integer i to the
* DataOutput out. This will only ever make one write call to the DataOutput. Use this instead of
* {@link WritableUtils#writeVInt(DataOutput, int)} which could make up to 4 separate writes to
* the underlying OutputStream. Is compatible with WritableUtils as it will write the same data.
*/
public static void writeVInt(DataOutput out, byte[] workBuffer, int i) throws IOException {
int size = UnsynchronizedBuffer.writeVInt(workBuffer, 0, i);
out.write(workBuffer, 0, size);
}
/**
* Use the provided byte[] to buffer only the bytes used to write out the long i to the DataOutput
* out. This will only ever make one write call to the DataOutput. Use this instead of
* {@link WritableUtils#writeVLong(DataOutput, long)} which could make up to 8 separate writes to
* the underlying OutputStream. Is compatible with WritableUtils as it will write the same data.
*/
public static void writeVLong(DataOutput out, byte[] workBuffer, long i) throws IOException {
int size = UnsynchronizedBuffer.writeVLong(workBuffer, 0, i);
out.write(workBuffer, 0, size);
}
/**
* Writes a variable int directly to a byte array. Is compatible with {@link WritableUtils} as it
* will write the same data.
*/
public static int writeVInt(byte[] dest, int offset, int i) {
return writeVLong(dest, offset, i);
}
/**
* Writes a variable long directly to a byte array. Is compatible with {@link WritableUtils} as it
* will write the same data.
*
* @param dest The destination array for the long to be written to
* @param offset The location where to write the long to
* @param value The long value being written into byte array
* @return Returns the new offset location
*/
public static int writeVLong(byte[] dest, int offset, long value) {
if (value >= -112 && value <= 127) {
dest[offset++] = (byte) value;
return offset;
}
int len = -112;
if (value < 0) {
value ^= -1L; // take one's complement'
len = -120;
}
long tmp = value;
while (tmp != 0) {
tmp = tmp >> 8;
len--;
}
dest[offset++] = (byte) len;
len = (len < -120) ? -(len + 120) : -(len + 112);
for (int idx = len; idx != 0; idx--) {
int shiftbits = (idx - 1) * 8;
long mask = 0xFFL << shiftbits;
dest[offset++] = (byte) ((value & mask) >> shiftbits);
}
return offset;
}
}
| 9,569 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Merge.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.FILES;
import static org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.PREV_ROW;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.accumulo.core.cli.ClientOpts;
import org.apache.accumulo.core.client.Accumulo;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.schema.DataFileValue;
import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
import org.apache.accumulo.core.trace.TraceUtil;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.IStringConverter;
import com.beust.jcommander.Parameter;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.context.Scope;
public class Merge {
public static class MergeException extends Exception {
private static final long serialVersionUID = 1L;
MergeException(Exception ex) {
super(ex);
}
}
private static final Logger log = LoggerFactory.getLogger(Merge.class);
protected void message(String format, Object... args) {
log.info(String.format(format, args));
}
public static class MemoryConverter implements IStringConverter<Long> {
@Override
public Long convert(String value) {
return ConfigurationTypeHelper.getFixedMemoryAsBytes(value);
}
}
static class TextConverter implements IStringConverter<Text> {
@Override
public Text convert(String value) {
return new Text(value);
}
}
static class Opts extends ClientOpts {
@Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
String tableName;
@Parameter(names = {"-s", "--size"}, description = "merge goal size",
converter = MemoryConverter.class)
Long goalSize = null;
@Parameter(names = {"-f", "--force"},
description = "merge small tablets even if merging them to larger"
+ " tablets might cause a split")
boolean force = false;
@Parameter(names = {"-b", "--begin"}, description = "start tablet",
converter = TextConverter.class)
Text begin = null;
@Parameter(names = {"-e", "--end"}, description = "end tablet", converter = TextConverter.class)
Text end = null;
}
public void start(String[] args) throws MergeException {
Opts opts = new Opts();
opts.parseArgs(Merge.class.getName(), args);
Span span = TraceUtil.startSpan(Merge.class, "start");
try (Scope scope = span.makeCurrent()) {
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientProps()).build()) {
if (!client.tableOperations().exists(opts.tableName)) {
System.err.println("table " + opts.tableName + " does not exist");
return;
}
if (opts.goalSize == null || opts.goalSize < 1) {
AccumuloConfiguration tableConfig =
new ConfigurationCopy(client.tableOperations().getConfiguration(opts.tableName));
opts.goalSize = tableConfig.getAsBytes(Property.TABLE_SPLIT_THRESHOLD);
}
message("Merging tablets in table %s to %d bytes", opts.tableName, opts.goalSize);
mergomatic(client, opts.tableName, opts.begin, opts.end, opts.goalSize, opts.force);
} catch (Exception ex) {
TraceUtil.setException(span, ex, true);
throw new MergeException(ex);
} finally {
span.end();
}
}
}
public static void main(String[] args) throws MergeException {
Merge merge = new Merge();
merge.start(args);
}
public static class Size {
public Size(KeyExtent extent, long size) {
this.extent = extent;
this.size = size;
}
KeyExtent extent;
long size;
}
public void mergomatic(AccumuloClient client, String table, Text start, Text end, long goalSize,
boolean force) throws MergeException {
try {
if (table.equals(MetadataTable.NAME)) {
throw new IllegalArgumentException("cannot merge tablets on the metadata table");
}
List<Size> sizes = new ArrayList<>();
long totalSize = 0;
// Merge any until you get larger than the goal size, and then merge one less tablet
Iterator<Size> sizeIterator = getSizeIterator(client, table, start, end);
while (sizeIterator.hasNext()) {
Size next = sizeIterator.next();
totalSize += next.size;
sizes.add(next);
if (totalSize > goalSize) {
totalSize = mergeMany(client, table, sizes, goalSize, force, false);
}
}
if (sizes.size() > 1) {
mergeMany(client, table, sizes, goalSize, force, true);
}
} catch (Exception ex) {
throw new MergeException(ex);
}
}
protected long mergeMany(AccumuloClient client, String table, List<Size> sizes, long goalSize,
boolean force, boolean last) throws MergeException {
// skip the big tablets, which will be the typical case
while (!sizes.isEmpty()) {
if (sizes.get(0).size < goalSize) {
break;
}
sizes.remove(0);
}
if (sizes.isEmpty()) {
return 0;
}
// collect any small ones
long mergeSize = 0;
int numToMerge = 0;
for (int i = 0; i < sizes.size(); i++) {
if (mergeSize + sizes.get(i).size > goalSize) {
numToMerge = i;
break;
}
mergeSize += sizes.get(i).size;
}
if (numToMerge > 1) {
mergeSome(client, table, sizes, numToMerge);
} else {
if (numToMerge == 1 && sizes.size() > 1) {
// here we have the case of a merge candidate that is surrounded by candidates that would
// split
if (force) {
mergeSome(client, table, sizes, 2);
} else {
sizes.remove(0);
}
}
}
if (numToMerge == 0 && sizes.size() > 1 && last) {
// That's the last tablet, and we have a bunch to merge
mergeSome(client, table, sizes, sizes.size());
}
long result = 0;
for (Size s : sizes) {
result += s.size;
}
return result;
}
protected void mergeSome(AccumuloClient client, String table, List<Size> sizes, int numToMerge)
throws MergeException {
merge(client, table, sizes, numToMerge);
for (int i = 0; i < numToMerge; i++) {
sizes.remove(0);
}
}
protected void merge(AccumuloClient client, String table, List<Size> sizes, int numToMerge)
throws MergeException {
try {
Text start = sizes.get(0).extent.prevEndRow();
Text end = sizes.get(numToMerge - 1).extent.endRow();
message("Merging %d tablets from (%s to %s]", numToMerge,
start == null ? "-inf"
: Key.toPrintableString(start.getBytes(), 0, start.getLength(), start.getLength()),
end == null ? "+inf"
: Key.toPrintableString(end.getBytes(), 0, end.getLength(), end.getLength()));
client.tableOperations().merge(table, start, end);
} catch (Exception ex) {
throw new MergeException(ex);
}
}
protected Iterator<Size> getSizeIterator(AccumuloClient client, String tablename, Text start,
Text end) throws MergeException {
// open up metadata, walk through the tablets.
TableId tableId;
TabletsMetadata tablets;
try {
ClientContext context = (ClientContext) client;
tableId = context.getTableId(tablename);
tablets = TabletsMetadata.builder(context).scanMetadataTable()
.overRange(new KeyExtent(tableId, end, start).toMetaRange()).fetch(FILES, PREV_ROW)
.build();
} catch (Exception e) {
throw new MergeException(e);
}
return tablets.stream().map(tm -> {
long size = tm.getFilesMap().values().stream().mapToLong(DataFileValue::getSize).sum();
return new Size(tm.getExtent(), size);
}).iterator();
}
}
| 9,570 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/LocalityGroupUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.util.stream.Collectors.toUnmodifiableSet;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.thrift.TMutation;
import org.apache.accumulo.core.file.FileSKVIterator;
import org.apache.accumulo.core.file.rfile.RFile.Reader;
import org.apache.commons.lang3.mutable.MutableLong;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class LocalityGroupUtil {
private static final Logger log = LoggerFactory.getLogger(LocalityGroupUtil.class);
/**
* Create a set of families to be passed into the SortedKeyValueIterator seek call from a supplied
* set of columns. We are using the immutable set to enable faster comparisons down in the
* LocalityGroupIterator.
*
* @param columns The set of columns
* @return An immutable set of columns
*/
public static Set<ByteSequence> families(Collection<Column> columns) {
if (columns.isEmpty()) {
return Set.of();
}
return columns.stream().map(c -> new ArrayByteSequence(c.getColumnFamily()))
.collect(toUnmodifiableSet());
}
public static class LocalityGroupConfigurationError extends AccumuloException {
private static final long serialVersionUID = 855450342044719186L;
LocalityGroupConfigurationError(String why) {
super(why);
}
}
public static boolean isLocalityGroupProperty(String prop) {
return prop.startsWith(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey())
|| prop.equals(Property.TABLE_LOCALITY_GROUPS.getKey());
}
public static void checkLocalityGroups(Map<String,String> config)
throws LocalityGroupConfigurationError {
ConfigurationCopy cc = new ConfigurationCopy(config);
if (cc.get(Property.TABLE_LOCALITY_GROUPS) != null) {
getLocalityGroups(cc);
}
}
public static Map<String,Set<ByteSequence>>
getLocalityGroupsIgnoringErrors(AccumuloConfiguration acuconf, TableId tableId) {
try {
return getLocalityGroups(acuconf);
} catch (LocalityGroupConfigurationError | RuntimeException e) {
log.warn("Failed to get locality group config for tableId:" + tableId
+ ", proceeding without locality groups.", e);
}
return Collections.emptyMap();
}
public static Map<String,Set<ByteSequence>> getLocalityGroups(AccumuloConfiguration acuconf)
throws LocalityGroupConfigurationError {
Map<String,Set<ByteSequence>> result = new HashMap<>();
String[] groups = acuconf.get(Property.TABLE_LOCALITY_GROUPS).split(",");
for (String group : groups) {
if (!group.isEmpty()) {
result.put(group, new HashSet<>());
}
}
HashSet<ByteSequence> all = new HashSet<>();
for (Entry<String,String> entry : acuconf) {
String property = entry.getKey();
String value = entry.getValue();
String prefix = Property.TABLE_LOCALITY_GROUP_PREFIX.getKey();
if (property.startsWith(prefix)) {
// this property configures a locality group, find out which one:
String group = property.substring(prefix.length());
String[] parts = group.split("\\.");
group = parts[0];
if (result.containsKey(group) && (parts.length == 1)) {
Set<ByteSequence> colFamsSet = decodeColumnFamilies(value);
if (!Collections.disjoint(all, colFamsSet)) {
colFamsSet.retainAll(all);
throw new LocalityGroupConfigurationError("Column families " + colFamsSet + " in group "
+ group + " is already used by another locality group");
}
all.addAll(colFamsSet);
result.put(group, colFamsSet);
}
}
}
Set<Entry<String,Set<ByteSequence>>> es = result.entrySet();
for (Entry<String,Set<ByteSequence>> entry : es) {
if (entry.getValue().isEmpty()) {
throw new LocalityGroupConfigurationError(
"Locality group " + entry.getKey() + " specified but not declared");
}
}
// result.put("", all);
return result;
}
public static Set<ByteSequence> decodeColumnFamilies(String colFams)
throws LocalityGroupConfigurationError {
HashSet<ByteSequence> colFamsSet = new HashSet<>();
for (String family : colFams.split(",")) {
ByteSequence cfbs = decodeColumnFamily(family);
colFamsSet.add(cfbs);
}
return colFamsSet;
}
public static ByteSequence decodeColumnFamily(String colFam)
throws LocalityGroupConfigurationError {
byte[] output = new byte[colFam.length()];
int pos = 0;
for (int i = 0; i < colFam.length(); i++) {
char c = colFam.charAt(i);
if (c == '\\') {
// next char must be 'x' or '\'
i++;
if (i >= colFam.length()) {
throw new LocalityGroupConfigurationError("Expected 'x' or '\' after '\' in " + colFam);
}
char nc = colFam.charAt(i);
switch (nc) {
case '\\':
output[pos++] = '\\';
break;
case 'x':
// next two chars must be [0-9][0-9]
i++;
output[pos++] = (byte) (0xff & Integer.parseInt(colFam.substring(i, i + 2), 16));
i++;
break;
default:
throw new LocalityGroupConfigurationError(
"Expected 'x' or '\' after '\' in " + colFam);
}
} else {
output[pos++] = (byte) (0xff & c);
}
}
return new ArrayByteSequence(output, 0, pos);
}
public static String encodeColumnFamilies(Set<Text> colFams) {
SortedSet<String> ecfs = new TreeSet<>();
StringBuilder sb = new StringBuilder();
for (Text text : colFams) {
String ecf = encodeColumnFamily(sb, text.getBytes(), text.getLength());
ecfs.add(ecf);
}
return Joiner.on(",").join(ecfs);
}
public static String encodeColumnFamily(ByteSequence bs) {
if (bs.offset() != 0) {
throw new IllegalArgumentException("The offset cannot be non-zero.");
}
return encodeColumnFamily(new StringBuilder(), bs.getBackingArray(), bs.length());
}
private static String encodeColumnFamily(StringBuilder sb, byte[] ba, int len) {
sb.setLength(0);
for (int i = 0; i < len; i++) {
int c = 0xff & ba[i];
if (c == '\\') {
sb.append("\\\\");
} else if (c >= 32 && c <= 126 && c != ',') {
sb.append((char) c);
} else {
sb.append("\\x").append(String.format("%02X", c));
}
}
return sb.toString();
}
public static class PartitionedMutation extends Mutation {
private byte[] row;
private List<ColumnUpdate> updates;
public PartitionedMutation(byte[] row, List<ColumnUpdate> updates) {
this.row = row;
this.updates = updates;
}
@Override
public byte[] getRow() {
return row;
}
@Override
public List<ColumnUpdate> getUpdates() {
return updates;
}
@Override
public TMutation toThrift() {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
throw new UnsupportedOperationException();
}
@Override
@SuppressFBWarnings(value = "EQ_UNUSUAL",
justification = "method expected to be unused or overridden")
public boolean equals(Object o) {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Mutation m) {
throw new UnsupportedOperationException();
}
}
public static class Partitioner {
private Map<ByteSequence,Integer> colfamToLgidMap;
private PreAllocatedArray<Map<ByteSequence,MutableLong>> groups;
public Partitioner(PreAllocatedArray<Map<ByteSequence,MutableLong>> groups) {
this.groups = groups;
this.colfamToLgidMap = new HashMap<>();
for (int i = 0; i < groups.length; i++) {
for (ByteSequence cf : groups.get(i).keySet()) {
colfamToLgidMap.put(cf, i);
}
}
}
public void partition(List<Mutation> mutations,
PreAllocatedArray<List<Mutation>> partitionedMutations) {
MutableByteSequence mbs = new MutableByteSequence(new byte[0], 0, 0);
PreAllocatedArray<List<ColumnUpdate>> parts = new PreAllocatedArray<>(groups.length + 1);
for (Mutation mutation : mutations) {
if (mutation.getUpdates().size() == 1) {
int lgid = getLgid(mbs, mutation.getUpdates().get(0));
partitionedMutations.get(lgid).add(mutation);
} else {
for (int i = 0; i < parts.length; i++) {
parts.set(i, null);
}
int lgcount = 0;
for (ColumnUpdate cu : mutation.getUpdates()) {
int lgid = getLgid(mbs, cu);
if (parts.get(lgid) == null) {
parts.set(lgid, new ArrayList<>());
lgcount++;
}
parts.get(lgid).add(cu);
}
if (lgcount == 1) {
for (int i = 0; i < parts.length; i++) {
if (parts.get(i) != null) {
partitionedMutations.get(i).add(mutation);
break;
}
}
} else {
for (int i = 0; i < parts.length; i++) {
if (parts.get(i) != null) {
partitionedMutations.get(i)
.add(new PartitionedMutation(mutation.getRow(), parts.get(i)));
}
}
}
}
}
}
private Integer getLgid(MutableByteSequence mbs, ColumnUpdate cu) {
mbs.setArray(cu.getColumnFamily(), 0, cu.getColumnFamily().length);
Integer lgid = colfamToLgidMap.get(mbs);
if (lgid == null) {
lgid = groups.length;
}
return lgid;
}
}
/**
* This method created to help seek an rfile for a locality group obtained from
* {@link Reader#getLocalityGroupCF()}. This method can possibly return an empty list for the
* default locality group. When this happens the default locality group needs to be seeked
* differently. This method helps do that.
*
* <p>
* For the default locality group will seek using the families of all other locality groups
* non-inclusive.
*
* @see Reader#getLocalityGroupCF()
*/
public static void seek(FileSKVIterator reader, Range range, String lgName,
Map<String,ArrayList<ByteSequence>> localityGroupCF) throws IOException {
Collection<ByteSequence> families;
boolean inclusive;
if (lgName == null) {
// this is the default locality group, create a set of all families not in the default group
Set<ByteSequence> nonDefaultFamilies = new HashSet<>();
localityGroupCF.forEach((k, v) -> {
if (k != null) {
nonDefaultFamilies.addAll(v);
}
});
families = nonDefaultFamilies;
inclusive = false;
} else {
families = localityGroupCF.get(lgName);
inclusive = true;
}
reader.seek(range, families, inclusive);
}
public static void ensureNonOverlappingGroups(Map<String,Set<Text>> groups) {
HashSet<Text> all = new HashSet<>();
for (Entry<String,Set<Text>> entry : groups.entrySet()) {
if (!Collections.disjoint(all, entry.getValue())) {
throw new IllegalArgumentException(
"Group " + entry.getKey() + " overlaps with another group");
}
if (entry.getValue().isEmpty()) {
throw new IllegalArgumentException("Group " + entry.getKey() + " is empty");
}
all.addAll(entry.getValue());
}
}
}
| 9,571 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Help.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import org.apache.accumulo.start.Main;
import org.apache.accumulo.start.spi.KeywordExecutable;
import com.google.auto.service.AutoService;
@AutoService(KeywordExecutable.class)
public class Help implements KeywordExecutable {
@Override
public String keyword() {
return "help";
}
@Override
public UsageGroup usageGroup() {
return UsageGroup.CORE;
}
@Override
public String description() {
return "Prints usage";
}
@Override
public void execute(final String[] args) {
Main.printUsage();
}
}
| 9,572 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/CompletableFutureUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
public class CompletableFutureUtil {
// create a binary tree of completable future operations, where each node in the tree merges the
// results of their children when complete
public static <T> CompletableFuture<T> merge(List<CompletableFuture<T>> futures,
BiFunction<T,T,T> mergeFunc, Supplier<T> nothing) {
if (futures.isEmpty()) {
return CompletableFuture.completedFuture(nothing.get());
}
while (futures.size() > 1) {
ArrayList<CompletableFuture<T>> mergedFutures = new ArrayList<>(futures.size() / 2);
for (int i = 0; i < futures.size(); i += 2) {
if (i + 1 == futures.size()) {
mergedFutures.add(futures.get(i));
} else {
mergedFutures.add(futures.get(i).thenCombine(futures.get(i + 1), mergeFunc));
}
}
futures = mergedFutures;
}
return futures.get(0);
}
/**
* Iterate some function until a given condition is met.
*
* The step function should always return an asynchronous {@code
* CompletableFuture} in order to avoid stack overflows.
*/
public static <T> CompletableFuture<T> iterateUntil(Function<T,CompletableFuture<T>> step,
Predicate<T> isDone, T init) {
// We'd like to use a lambda here, but lambdas don't have
// `this`, so we would have to use some clumsy indirection to
// achieve self-reference.
Function<T,CompletableFuture<T>> go = new Function<>() {
@Override
public CompletableFuture<T> apply(T x) {
if (isDone.test(x)) {
return CompletableFuture.completedFuture(x);
}
return step.apply(x).thenCompose(this);
}
};
return CompletableFuture.completedFuture(init).thenCompose(go);
}
}
| 9,573 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Pair.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.Map.Entry;
import java.util.Objects;
public class Pair<A,B> {
private final A first;
private final B second;
public Pair(A f, B s) {
this.first = f;
this.second = s;
}
@Override
public int hashCode() {
return Objects.hashCode(first) + Objects.hashCode(second);
}
@Override
public boolean equals(Object o) {
if (o instanceof Pair<?,?>) {
Pair<?,?> other = (Pair<?,?>) o;
return Objects.equals(first, other.first) && Objects.equals(second, other.second);
}
return false;
}
public A getFirst() {
return first;
}
public B getSecond() {
return second;
}
@Override
public String toString() {
return toString("(", ",", ")");
}
public String toString(String prefix, String separator, String suffix) {
return prefix + first + separator + second + suffix;
}
public Entry<A,B> toMapEntry() {
return new SimpleImmutableEntry<>(getFirst(), getSecond());
}
public Pair<B,A> swap() {
return new Pair<>(getSecond(), getFirst());
}
public static <K2,V2,K1 extends K2,V1 extends V2> Pair<K2,V2> fromEntry(Entry<K1,V1> entry) {
return new Pair<>(entry.getKey(), entry.getValue());
}
}
| 9,574 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/StopWatch.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.EnumMap;
public class StopWatch<K extends Enum<K>> {
EnumMap<K,Long> startTime;
EnumMap<K,Long> totalTime;
public StopWatch(Class<K> k) {
startTime = new EnumMap<>(k);
totalTime = new EnumMap<>(k);
}
public synchronized void start(K timer) {
if (startTime.containsKey(timer)) {
throw new IllegalStateException(timer + " already started");
}
startTime.put(timer, System.currentTimeMillis());
}
public synchronized void stop(K timer) {
Long st = startTime.get(timer);
if (st == null) {
throw new IllegalStateException(timer + " not started");
}
Long existingTime = totalTime.get(timer);
if (existingTime == null) {
existingTime = 0L;
}
totalTime.put(timer, existingTime + (System.currentTimeMillis() - st));
startTime.remove(timer);
}
public synchronized long get(K timer) {
Long existingTime = totalTime.get(timer);
if (existingTime == null) {
existingTime = 0L;
}
return existingTime;
}
public synchronized double getSecs(K timer) {
Long existingTime = totalTime.get(timer);
if (existingTime == null) {
existingTime = 0L;
}
return existingTime / 1000.0;
}
}
| 9,575 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/OpTimer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import java.util.concurrent.TimeUnit;
/**
* Provides a stop watch for timing a single type of event. This code is based on the
* org.apache.hadoop.util.StopWatch available in hadoop 2.7.0
*/
public class OpTimer {
private boolean isStarted;
private long startNanos;
private long currentElapsedNanos;
/**
* Returns timer running state
*
* @return true if timer is running
*/
public boolean isRunning() {
return isStarted;
}
/**
* Start the timer instance.
*
* @return this instance for fluent chaining.
* @throws IllegalStateException if start is called on running instance.
*/
public OpTimer start() throws IllegalStateException {
if (isStarted) {
throw new IllegalStateException("OpTimer is already running");
}
isStarted = true;
startNanos = System.nanoTime();
return this;
}
/**
* Stop the timer instance.
*
* @return this instance for fluent chaining.
* @throws IllegalStateException if stop is called on instance that is not running.
*/
public OpTimer stop() throws IllegalStateException {
if (!isStarted) {
throw new IllegalStateException("OpTimer is already stopped");
}
long now = System.nanoTime();
isStarted = false;
currentElapsedNanos += now - startNanos;
return this;
}
/**
* Stops timer instance and current elapsed time to 0.
*
* @return this instance for fluent chaining
*/
public OpTimer reset() {
currentElapsedNanos = 0;
isStarted = false;
return this;
}
/**
* Converts current timer value to specific unit. The conversion to courser granularities truncate
* with loss of precision.
*
* @param timeUnit the time unit that will converted to.
* @return truncated time in unit of specified time unit.
*/
public long now(TimeUnit timeUnit) {
return timeUnit.convert(now(), NANOSECONDS);
}
/**
* Returns the current elapsed time scaled to the provided time unit. This method does not
* truncate like {@link #now(TimeUnit)} but returns the value as a double.
*
* <p>
* Note: this method is not included in the hadoop 2.7 org.apache.hadoop.util.StopWatch class. If
* that class is adopted, then provisions will be required to replace this method.
*
* @param timeUnit the time unit to scale the elapsed time to.
* @return the elapsed time of this instance scaled to the provided time unit.
*/
public double scale(TimeUnit timeUnit) {
return (double) now() / NANOSECONDS.convert(1L, timeUnit);
}
/**
* Returns current timer elapsed time as nanoseconds.
*
* @return elapsed time in nanoseconds.
*/
public long now() {
return isStarted ? System.nanoTime() - startNanos + currentElapsedNanos : currentElapsedNanos;
}
/**
* Return the current elapsed time in nanoseconds as a string.
*
* @return timer elapsed time as nanoseconds.
*/
@Override
public String toString() {
return String.valueOf(now());
}
}
| 9,576 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ConfigurationImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.accumulo.core.client.PluginEnvironment;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.conf.PropertyType;
import org.apache.accumulo.core.spi.common.ServiceEnvironment.Configuration;
/**
* The implementation class used for providing SPI configuration without exposing internal types.
*/
public class ConfigurationImpl implements Configuration {
private final AccumuloConfiguration acfg;
private final AccumuloConfiguration.Deriver<Map<String,String>> tableCustomDeriver;
private final AccumuloConfiguration.Deriver<Map<String,String>> customDeriver;
public ConfigurationImpl(AccumuloConfiguration acfg) {
this.acfg = acfg;
this.customDeriver =
acfg.newDeriver(aconf -> buildCustom(aconf, Property.GENERAL_ARBITRARY_PROP_PREFIX));
this.tableCustomDeriver =
acfg.newDeriver(aconf -> buildCustom(aconf, Property.TABLE_ARBITRARY_PROP_PREFIX));
}
@Override
public boolean isSet(String key) {
Property prop = Property.getPropertyByKey(key);
if (prop != null) {
return acfg.isPropertySet(prop);
} else {
return acfg.get(key) != null;
}
}
@Override
public String get(String key) {
// Get prop to check if sensitive, also looking up by prop may be more efficient.
Property prop = Property.getPropertyByKey(key);
if (prop != null) {
if (prop.isSensitive()) {
return null;
}
return acfg.get(prop);
} else {
return acfg.get(key);
}
}
@Override
public Map<String,String> getWithPrefix(String prefix) {
Property propertyPrefix = Property.getPropertyByKey(prefix);
if (propertyPrefix != null && propertyPrefix.getType() == PropertyType.PREFIX) {
return acfg.getAllPropertiesWithPrefix(propertyPrefix);
} else {
return StreamSupport.stream(acfg.spliterator(), false)
.filter(prop -> prop.getKey().startsWith(prefix))
.collect(Collectors.toMap(Entry::getKey, Entry::getValue));
}
}
@Override
public Map<String,String> getCustom() {
return customDeriver.derive();
}
@Override
public String getCustom(String keySuffix) {
return getCustom().get(keySuffix);
}
@Override
public Map<String,String> getTableCustom() {
return tableCustomDeriver.derive();
}
@Override
public String getTableCustom(String keySuffix) {
return getTableCustom().get(keySuffix);
}
private static Map<String,String> buildCustom(AccumuloConfiguration conf, Property customPrefix) {
return conf.getAllPropertiesWithPrefix(customPrefix).entrySet().stream().collect(
Collectors.toUnmodifiableMap(e -> e.getKey().substring(customPrefix.getKey().length()),
Entry::getValue));
}
@Override
public Iterator<Entry<String,String>> iterator() {
return StreamSupport.stream(acfg.spliterator(), false)
.filter(e -> !Property.isSensitive(e.getKey())).iterator();
}
@Override
public <T> Supplier<T>
getDerived(Function<PluginEnvironment.Configuration,T> computeDerivedValue) {
Configuration outerConfiguration = this;
AccumuloConfiguration.Deriver<T> deriver =
acfg.newDeriver(entries -> computeDerivedValue.apply(outerConfiguration));
return deriver::derive;
}
}
| 9,577 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/NumUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.text.DecimalFormat;
public class NumUtil {
private static final String[] QUANTITY_SUFFIX = {"", "K", "M", "B", "T", "e15", "e18", "e21"};
private static final String[] SIZE_SUFFIX = {"", "K", "M", "G", "T", "P", "E", "Z"};
private static DecimalFormat df = new DecimalFormat("#,###,##0");
private static DecimalFormat df_mantissa = new DecimalFormat("#,###,##0.00");
public static String bigNumberForSize(long big) {
return bigNumber(big, SIZE_SUFFIX, 1024);
}
public static String bigNumberForQuantity(long big) {
return bigNumber(big, QUANTITY_SUFFIX, 1000);
}
public static String bigNumberForQuantity(double big) {
return bigNumber(big, QUANTITY_SUFFIX, 1000);
}
private static String bigNumber(long big, String[] SUFFIXES, long base) {
if (big < base) {
return df.format(big) + SUFFIXES[0];
}
int exp = (int) (Math.log(big) / Math.log(base));
double val = big / Math.pow(base, exp);
return df_mantissa.format(val) + SUFFIXES[exp];
}
private static String bigNumber(double big, String[] SUFFIXES, long base) {
if (big < base) {
return df_mantissa.format(big) + SUFFIXES[0];
}
int exp = (int) (Math.log(big) / Math.log(base));
double val = big / Math.pow(base, exp);
return df_mantissa.format(val) + SUFFIXES[exp];
}
}
| 9,578 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Validator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.util.Objects.requireNonNull;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
/**
* A class that validates arguments of a specified generic type. Given a validation function that
* emits an error message if and only if the validation fails, this object's validate method will
* return the original valid argument, or throw an IllegalArgumentException with the custom error
* message if it fails to validate.
*/
public class Validator<T> {
public static final Optional<String> OK = Optional.empty();
private final Function<T,Optional<String>> validateFunction;
private volatile T lastValidated = null;
/**
* Constructor to build a validator given the mapping function that validates. If the argument is
* valid, the mapping function should return an empty Optional. Otherwise, it should return an
* Optional containing the error message to be set in the IllegalArgumentException.
*
* @param validateFunction the function that validates or returns an error message
*/
public Validator(final Function<T,Optional<String>> validateFunction) {
this.validateFunction = requireNonNull(validateFunction);
}
/**
* Validates the provided argument.
*
* @param argument argument to validate
* @return the argument, if validation passes
* @throws IllegalArgumentException if validation fails
*/
public final T validate(final T argument) {
// check if argument was recently validated, to short-circuit the check
// this especially helps if an API validates, then calls another API that validates the same
T lastValidatedSnapshot = lastValidated;
if (lastValidatedSnapshot != null && Objects.equals(argument, lastValidatedSnapshot)) {
return argument;
}
validateFunction.apply(argument).ifPresent(msg -> {
throw new IllegalArgumentException(msg);
});
// save most recently validated, to save time validating again
lastValidated = argument;
return argument;
}
/**
* Creates a new validator that is the conjunction of this one and the given one. An argument
* passed to the returned validator is valid if only if it passes both validators. If the other
* validator is null, the current validator is returned unchanged.
*
* @param other other validator
* @return combined validator
*/
public final Validator<T> and(final Validator<T> other) {
if (other == null) {
return this;
}
return new Validator<>(
arg -> validateFunction.apply(arg).or(() -> other.validateFunction.apply(arg)));
}
/**
* Creates a new validator that is the disjunction of this one and the given one. An argument
* passed to the returned validator is valid if and only if it passes at least one of the
* validators. If the other validator is null, the current validator is returned unchanged.
*
* @param other other validator
* @return combined validator
*/
public final Validator<T> or(final Validator<T> other) {
if (other == null) {
return this;
}
return new Validator<>(
arg -> validateFunction.apply(arg).isEmpty() ? OK : other.validateFunction.apply(arg));
}
/**
* Creates a new validator that is the negation of this one. An argument passed to the returned
* validator is valid only if it fails this one.
*
* @return negated validator
*/
public final Validator<T> not() {
return new Validator<>(arg -> {
return validateFunction.apply(arg).isPresent() ? OK
: Optional.of("Validation should have failed with: Invalid argument " + arg);
});
}
}
| 9,579 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Stat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import org.apache.accumulo.core.spi.common.Stats;
import org.apache.commons.math3.stat.descriptive.moment.Mean;
public class Stat implements Stats {
long min;
long max;
long sum;
Mean mean;
public Stat() {
mean = new Mean();
clear();
}
public void addStat(long stat) {
min = Math.min(min, stat);
max = Math.max(max, stat);
sum += stat;
mean.increment(stat);
}
@Override
public long min() {
return num() == 0 ? 0L : min;
}
@Override
public long max() {
return num() == 0 ? 0L : max;
}
@Override
public long sum() {
return sum;
}
@Override
public double mean() {
return mean.getResult();
}
@Override
public String toString() {
return String.format("%,d %,d %,.2f %,d", min(), max(), mean(), mean.getN());
}
public void clear() {
min = Long.MAX_VALUE;
max = Long.MIN_VALUE;
sum = 0;
mean.clear();
}
@Override
public long num() {
return mean.getN();
}
public Stat copy() {
Stat stat = new Stat();
stat.min = this.min;
stat.max = this.max;
stat.sum = this.sum;
stat.mean = this.mean.copy();
return stat;
}
}
| 9,580 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/PeekingIterator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.Iterator;
public class PeekingIterator<E> implements Iterator<E> {
boolean isInitialized;
Iterator<E> source;
E top;
public PeekingIterator(Iterator<E> source) {
this.source = source;
if (source.hasNext()) {
top = source.next();
} else {
top = null;
}
isInitialized = true;
}
/**
* Creates an uninitialized instance. This should be used in conjunction with
* {@link #initialize(Iterator)}.
*/
public PeekingIterator() {
isInitialized = false;
}
/**
* Initializes this iterator, to be used with {@link #PeekingIterator()}.
*/
public PeekingIterator<E> initialize(Iterator<E> source) {
this.source = source;
if (source.hasNext()) {
top = source.next();
} else {
top = null;
}
isInitialized = true;
return this;
}
public E peek() {
if (!isInitialized) {
throw new IllegalStateException("Iterator has not yet been initialized");
}
return top;
}
@Override
public E next() {
if (!isInitialized) {
throw new IllegalStateException("Iterator has not yet been initialized");
}
E lastPeeked = top;
if (source.hasNext()) {
top = source.next();
} else {
top = null;
}
return lastPeeked;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public boolean hasNext() {
if (!isInitialized) {
throw new IllegalStateException("Iterator has not yet been initialized");
}
return top != null;
}
}
| 9,581 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/CancelFlagFuture.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* A simple future wrapper that will set an atomic boolean to true if a future is successfully
* canceled
*/
public class CancelFlagFuture<T> implements Future<T> {
private Future<T> wrappedFuture;
private AtomicBoolean cancelFlag;
public CancelFlagFuture(Future<T> wrappedFuture, AtomicBoolean cancelFlag) {
this.wrappedFuture = wrappedFuture;
this.cancelFlag = cancelFlag;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
boolean ret = wrappedFuture.cancel(mayInterruptIfRunning);
if (ret) {
cancelFlag.set(true);
}
return ret;
}
@Override
public boolean isCancelled() {
return wrappedFuture.isCancelled();
}
@Override
public boolean isDone() {
return wrappedFuture.isDone();
}
@Override
public T get() throws InterruptedException, ExecutionException {
return wrappedFuture.get();
}
@Override
public T get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return wrappedFuture.get(timeout, unit);
}
}
| 9,582 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Interner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.lang.ref.WeakReference;
import java.util.WeakHashMap;
/**
* A utility that mimics String.intern() for any immutable object type (including String).
*/
public class Interner<T> {
private final WeakHashMap<T,WeakReference<T>> internTable = new WeakHashMap<>();
public synchronized T intern(T item) {
WeakReference<T> ref = internTable.get(item);
if (ref != null) {
T oldItem = ref.get();
if (oldItem != null) {
return oldItem;
}
}
internTable.put(item, new WeakReference<>(item));
return item;
}
// for testing
synchronized int size() {
return internTable.size();
}
}
| 9,583 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/PreAllocatedArray.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.ArrayList;
import java.util.Iterator;
import com.google.common.collect.Iterators;
/**
* An {@link ArrayList} implementation that represents a type-safe pre-allocated array. This should
* be used exactly like an array, but helps avoid type-safety issues when mixing arrays with
* generics. The iterator is unmodifiable.
*/
public class PreAllocatedArray<T> implements Iterable<T> {
private final ArrayList<T> internal;
public final int length;
/**
* Creates an instance of the given capacity, with all elements initialized to null
*/
public PreAllocatedArray(final int capacity) {
length = capacity;
internal = new ArrayList<>(capacity);
for (int i = 0; i < capacity; i++) {
internal.add(null);
}
}
/**
* Set the element at the specified index, and return the old value.
*/
public T set(final int index, final T element) {
return internal.set(index, element);
}
/**
* Get the item stored at the specified index.
*/
public T get(final int index) {
return internal.get(index);
}
@Override
public Iterator<T> iterator() {
return Iterators.unmodifiableIterator(internal.iterator());
}
}
| 9,584 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/LazySingletons.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.security.SecureRandom;
import java.util.function.Supplier;
import com.google.common.base.Suppliers;
import com.google.gson.Gson;
/**
* This class provides easy access to global, immutable, lazily-instantiated, and thread-safe
* singleton resources. These should be used with static imports.
*/
public class LazySingletons {
// prevent instantiating this utility class
private LazySingletons() {}
/**
* A Gson instance constructed with defaults. Construct your own if you need custom settings.
*/
public static final Supplier<Gson> GSON = Suppliers.memoize(Gson::new);
/**
* A SecureRandom instance created with the default constructor.
*/
public static final Supplier<SecureRandom> RANDOM = Suppliers.memoize(SecureRandom::new);
}
| 9,585 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ThriftMessageUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.util.Objects.requireNonNull;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.thrift.TBase;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TCompactProtocol;
import org.apache.thrift.protocol.TProtocol;
import org.apache.thrift.transport.TMemoryBuffer;
import org.apache.thrift.transport.TMemoryInputTransport;
import org.apache.thrift.transport.TTransportException;
/**
* Serializes and deserializes Thrift messages to and from byte arrays. This class is not
* thread-safe, external synchronization is necessary if it is used concurrently.
*/
public class ThriftMessageUtil {
private final int initialCapacity;
private final TMemoryInputTransport inputTransport;
private final TCompactProtocol inputProtocol;
public ThriftMessageUtil() throws IOException {
// TODO does this make sense? better to push this down to the serialize method (accept the
// transport as an argument)?
this.initialCapacity = 64;
try {
this.inputTransport = new TMemoryInputTransport();
} catch (TTransportException e) {
throw new IOException(e);
}
this.inputProtocol = new TCompactProtocol(inputTransport);
}
/**
* Convert the {@code msg} to a byte array representation
*
* @param msg The message to serialize
* @return The serialized message
* @throws IOException When serialization fails
*/
public ByteBuffer serialize(TBase<?,?> msg) throws IOException {
requireNonNull(msg);
try {
TMemoryBuffer transport = new TMemoryBuffer(initialCapacity);
TProtocol protocol = new TCompactProtocol(transport);
msg.write(protocol);
return ByteBuffer.wrap(transport.getArray(), 0, transport.length());
} catch (TException e) {
throw new IOException(e);
}
}
/**
* Assumes the entire contents of the byte array compose the serialized {@code instance}
*
* @see #deserialize(byte[], int, int, TBase)
*/
public <T extends TBase<?,?>> T deserialize(byte[] serialized, T instance) throws IOException {
return deserialize(serialized, 0, serialized.length, instance);
}
/**
* Deserializes a message into the provided {@code instance} from {@code serialized}
*
* @param serialized The serialized representation of the object
* @param instance An instance of the object to reconstitute
* @return The reconstituted instance provided
* @throws IOException When deserialization fails
*/
public <T extends TBase<?,?>> T deserialize(byte[] serialized, int offset, int length, T instance)
throws IOException {
requireNonNull(instance);
inputTransport.reset(serialized, offset, length);
try {
instance.read(inputProtocol);
} catch (TException e) {
throw new IOException(e);
}
return instance;
}
}
| 9,586 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/UtilWaitThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class UtilWaitThread {
private static final Logger log = LoggerFactory.getLogger(UtilWaitThread.class);
public static void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
log.error("{}", e.getMessage(), e);
}
}
}
| 9,587 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/HostAndPortComparator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.Comparator;
import com.google.common.net.HostAndPort;
public class HostAndPortComparator implements Comparator<HostAndPort> {
private static final Comparator<HostAndPort> COMPARATOR = Comparator.nullsFirst(
Comparator.comparing(HostAndPort::getHost).thenComparingInt(h -> h.getPortOrDefault(0)));
@Override
public int compare(HostAndPort o1, HostAndPort o2) {
return COMPARATOR.compare(o1, o2);
}
}
| 9,588 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ByteArraySet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.TreeSet;
public class ByteArraySet extends TreeSet<byte[]> {
private static final long serialVersionUID = 1L;
public ByteArraySet() {
super(new ByteArrayComparator());
}
public ByteArraySet(Collection<? extends byte[]> c) {
this();
addAll(c);
}
public static ByteArraySet fromStrings(Collection<String> c) {
List<byte[]> lst = new ArrayList<>();
for (String s : c) {
lst.add(s.getBytes(UTF_8));
}
return new ByteArraySet(lst);
}
public static ByteArraySet fromStrings(String... c) {
return ByteArraySet.fromStrings(Arrays.asList(c));
}
}
| 9,589 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import org.apache.accumulo.core.client.ScannerBase;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.hadoop.io.Text;
public class ColumnFQ implements Comparable<ColumnFQ> {
private Text colf;
private Text colq;
public ColumnFQ(Text colf, Text colq) {
if (colf == null || colq == null) {
throw new IllegalArgumentException();
}
this.colf = colf;
this.colq = colq;
}
public ColumnFQ(Key k) {
this(k.getColumnFamily(), k.getColumnQualifier());
}
public ColumnFQ(ColumnUpdate cu) {
this(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier()));
}
public Text getColumnQualifier() {
return colq;
}
public Text getColumnFamily() {
return colf;
}
public Column toColumn() {
return new Column(TextUtil.getBytes(colf), TextUtil.getBytes(colq), null);
}
public void fetch(ScannerBase sb) {
sb.fetchColumn(colf, colq);
}
public void put(Mutation m, Value v) {
m.put(colf, colq, v);
}
public void putDelete(Mutation m) {
m.putDelete(colf, colq);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof ColumnFQ)) {
return false;
}
if (this == o) {
return true;
}
ColumnFQ ocfq = (ColumnFQ) o;
return ocfq.colf.equals(colf) && ocfq.colq.equals(colq);
}
@Override
public int hashCode() {
return colf.hashCode() + colq.hashCode();
}
public boolean hasColumns(Key key) {
return key.compareColumnFamily(colf) == 0 && key.compareColumnQualifier(colq) == 0;
}
public boolean equals(Text colf, Text colq) {
return this.colf.equals(colf) && this.colq.equals(colq);
}
@Override
public int compareTo(ColumnFQ o) {
int cmp = colf.compareTo(o.colf);
if (cmp == 0) {
cmp = colq.compareTo(o.colq);
}
return cmp;
}
@Override
public String toString() {
return colf + ":" + colq;
}
}
| 9,590 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Validators.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.List;
import java.util.Optional;
import java.util.regex.Pattern;
import org.apache.accumulo.core.clientImpl.Namespace;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.RootTable;
import org.apache.accumulo.core.util.tables.TableNameUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
public class Validators {
private static final Logger log = LoggerFactory.getLogger(Validators.class);
// do not instantiate
private Validators() {}
private static final int MAX_SEGMENT_LEN = 1024;
private static final Pattern SEGMENT_PATTERN = Pattern.compile("\\w{1," + MAX_SEGMENT_LEN + "}");
// before we added the length restriction; some existing tables may still be long
private static final Pattern EXISTING_SEGMENT_PATTERN = Pattern.compile("\\w+");
private enum NameSegment {
Table, Namespace;
Optional<String> isNull() {
return Optional.of(name() + " name must not be null");
}
Optional<String> isBlank() {
return Optional.of(name() + " name must not be blank");
}
Optional<String> tooLong(String s) {
return Optional
.of(name() + " name exceeds a maximum length of " + MAX_SEGMENT_LEN + ": " + s);
}
Optional<String> invalidChars(String s) {
return Optional.of(name() + " name '" + s + "' contains invalid (non-word) characters.");
}
void warnTooLong(String s) {
log.warn(name() + " name exceeds a length of {};"
+ " Excessively long names are not supported and can result in unexpected behavior."
+ " Please rename {}", MAX_SEGMENT_LEN, s);
}
}
// namespace name validators
// common implementation for EXISTING_NAMESPACE_NAME and NEW_NAMESPACE_NAME
private static Optional<String> _namespaceName(String ns, boolean existing) {
if (ns == null) {
return NameSegment.Namespace.isNull();
}
// special case for default namespace, which always exists
if (ns.isEmpty()) {
return Validator.OK;
}
if (ns.length() > MAX_SEGMENT_LEN) {
if (existing) {
NameSegment.Namespace.warnTooLong(ns);
} else {
return NameSegment.Namespace.tooLong(ns);
}
}
if ((existing ? EXISTING_SEGMENT_PATTERN : SEGMENT_PATTERN).matcher(ns).matches()) {
return Validator.OK;
}
return NameSegment.Namespace.invalidChars(ns);
}
public static final Validator<String> EXISTING_NAMESPACE_NAME =
new Validator<>(ns -> _namespaceName(ns, true));
public static final Validator<String> NEW_NAMESPACE_NAME =
new Validator<>(ns -> _namespaceName(ns, false));
public static final Validator<String> NOT_BUILTIN_NAMESPACE = new Validator<>(ns -> {
if (ns == null) {
return NameSegment.Namespace.isNull();
}
if (Namespace.DEFAULT.name().equals(ns)) {
return Optional.of("Namespace must not be the reserved empty namespace");
}
if (Namespace.ACCUMULO.name().equals(ns)) {
return Optional.of("Namespace must not be the reserved namespace, " + ns);
}
return Validator.OK;
});
// table name validators
// common implementation for EXISTING_TABLE_NAME and NEW_TABLE_NAME
private static Optional<String> _tableName(String tableName, boolean existing) {
if (tableName == null) {
return NameSegment.Table.isNull();
}
int dotPosition = tableName.indexOf('.');
if (dotPosition == 0) {
return Optional.of("Table name must include a namespace prior to a dot(.) character");
}
String tablePart = tableName;
if (dotPosition > 0) {
String namespacePart = tableName.substring(0, dotPosition);
if (!EXISTING_SEGMENT_PATTERN.matcher(namespacePart).matches()) {
return NameSegment.Namespace.invalidChars(namespacePart);
}
tablePart = tableName.substring(dotPosition + 1);
}
if (tablePart.isBlank()) {
return NameSegment.Table.isBlank();
}
if (tablePart.length() > MAX_SEGMENT_LEN) {
if (existing) {
NameSegment.Table.warnTooLong(tablePart);
} else {
return NameSegment.Table.tooLong(tablePart);
}
}
if (!(existing ? EXISTING_SEGMENT_PATTERN : SEGMENT_PATTERN).matcher(tablePart).matches()) {
return NameSegment.Table.invalidChars(tablePart);
}
return Validator.OK;
}
public static final Validator<String> EXISTING_TABLE_NAME =
new Validator<>(tableName -> _tableName(tableName, true));
public static final Validator<String> NEW_TABLE_NAME =
new Validator<>(tableName -> _tableName(tableName, false));
private static final List<String> metadataTables = List.of(RootTable.NAME, MetadataTable.NAME);
public static final Validator<String> NOT_METADATA_TABLE = new Validator<>(t -> {
if (t == null) {
return NameSegment.Table.isNull();
}
if (metadataTables.contains(t)) {
return Optional.of("Table must not be any of {" + Joiner.on(",").join(metadataTables) + "}");
}
return Validator.OK;
});
public static final Validator<String> NOT_BUILTIN_TABLE = new Validator<>(t -> {
if (Namespace.ACCUMULO.name().equals(TableNameUtil.qualify(t).getFirst())) {
return Optional.of("Table must not be in the '" + Namespace.ACCUMULO.name() + "' namespace");
}
return Validator.OK;
});
public static Validator<String> sameNamespaceAs(String oldTableName) {
final String oldNamespace = TableNameUtil.qualify(oldTableName).getFirst();
return new Validator<>(newName -> {
if (!oldNamespace.equals(TableNameUtil.qualify(newName).getFirst())) {
return Optional
.of("Unable to move tables to a new namespace by renaming. The namespace for " + newName
+ " does not match " + oldTableName);
}
return Validator.OK;
});
}
// table id validators
private static final Pattern VALID_ID_PATTERN = Pattern.compile("[a-z0-9]+"); // BigDecimal base36
public static final Validator<TableId> VALID_TABLE_ID = new Validator<>(id -> {
if (id == null) {
return Optional.of("Table id must not be null");
}
if (RootTable.ID.equals(id) || MetadataTable.ID.equals(id)
|| VALID_ID_PATTERN.matcher(id.canonical()).matches()) {
return Validator.OK;
}
return Optional
.of("Table IDs are base-36 numbers, represented with lowercase alphanumeric digits: " + id);
});
public static final Validator<TableId> CAN_CLONE_TABLE = new Validator<>(id -> {
if (id == null) {
return Optional.of("Table id must not be null");
}
if (id.equals(MetadataTable.ID)) {
return Optional.of("Cloning " + MetadataTable.NAME + " is dangerous and no longer supported,"
+ " see https://github.com/apache/accumulo/issues/1309.");
}
if (id.equals(RootTable.ID)) {
return Optional.of("Unable to clone " + RootTable.NAME);
}
return Validator.OK;
});
public static final Validator<TableId> NOT_ROOT_TABLE_ID = new Validator<>(id -> {
if (id == null) {
return Optional.of("Table id must not be null");
}
if (RootTable.ID.equals(id)) {
return Optional
.of("Table must not be the " + RootTable.NAME + "(Id: " + RootTable.ID + ") table");
}
return Validator.OK;
});
}
| 9,591 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ComparablePair.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
public final class ComparablePair<A extends Comparable<A>,B extends Comparable<B>> extends Pair<A,B>
implements Comparable<ComparablePair<A,B>> {
public ComparablePair(A f, B s) {
super(f, s);
}
@Override
public int compareTo(ComparablePair<A,B> abPair) {
int cmp = getFirst().compareTo(abPair.getFirst());
if (cmp == 0) {
cmp = getSecond().compareTo(abPair.getSecond());
}
return cmp;
}
}
| 9,592 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/BadArgumentException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.regex.PatternSyntaxException;
public final class BadArgumentException extends PatternSyntaxException {
private static final long serialVersionUID = 1L;
public BadArgumentException(String desc, String badarg, int index) {
super(desc, badarg, index);
}
}
| 9,593 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/FastFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.base.Preconditions;
public class FastFormat {
private static final byte[] EMPTY_BYTES = new byte[] {};
// this 7 to 8 times faster than String.format("%s%06d",prefix, num)
public static byte[] toZeroPaddedString(long num, int width, int radix, byte[] prefix) {
Preconditions.checkArgument(num >= 0);
String strNum = Long.toString(num, radix);
byte[] ret = new byte[Math.max(strNum.length(), width) + prefix.length];
if (toZeroPaddedString(ret, 0, strNum, width, prefix) != ret.length) {
throw new IllegalArgumentException(" Did not format to expected width " + num + " " + width
+ " " + radix + " " + new String(prefix, UTF_8));
}
return ret;
}
public static byte[] toZeroPaddedHex(long hexadecimal) {
return toZeroPaddedString(hexadecimal, 16, 16, EMPTY_BYTES);
}
public static int toZeroPaddedString(byte[] output, int outputOffset, long num, int width,
int radix, byte[] prefix) {
Preconditions.checkArgument(num >= 0);
String strNum = Long.toString(num, radix);
return toZeroPaddedString(output, outputOffset, strNum, width, prefix);
}
private static int toZeroPaddedString(byte[] output, int outputOffset, String strNum, int width,
byte[] prefix) {
int index = outputOffset;
for (byte b : prefix) {
output[index++] = b;
}
int end = width - strNum.length() + index;
while (index < end) {
output[index++] = '0';
}
for (int i = 0; i < strNum.length(); i++) {
output[index++] = (byte) strNum.charAt(i);
}
return index - outputOffset;
}
/**
* Create a zero padded string from a hexadecimal number. This is a faster replacement for:
* String.format("%s%016x%s", PREFIX, tid, SUFFIX);
*/
public static String toHexString(String prefix, long hexadecimal, String suffix) {
return prefix + new String(toZeroPaddedString(hexadecimal, 16, 16, EMPTY_BYTES), UTF_8)
+ suffix;
}
/**
* Create a zero padded string from a hexadecimal number. This is a faster replacement for:
* String.format("%016x", tid)
*/
public static String toHexString(long hexadecimal) {
return toHexString("", hexadecimal, "");
}
}
| 9,594 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/MonitorUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.clientImpl.ClientContext;
import org.apache.accumulo.core.fate.zookeeper.ZooReader;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import com.google.common.annotations.VisibleForTesting;
public class MonitorUtil {
public static String getLocation(ClientContext context)
throws KeeperException, InterruptedException {
return getLocation(context.getZooReader(), context);
}
@VisibleForTesting
static String getLocation(ZooReader zr, ClientContext context)
throws KeeperException, InterruptedException {
try {
byte[] loc = zr.getData(context.getZooKeeperRoot() + Constants.ZMONITOR_HTTP_ADDR);
return loc == null ? null : new String(loc, UTF_8);
} catch (NoNodeException e) {
// If there's no node advertising the monitor, there's no monitor.
return null;
}
}
}
| 9,595 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Encoding.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.util.Base64;
public class Encoding {
public static String encodeAsBase64FileName(byte[] data) {
String encodedRow = Base64.getUrlEncoder().encodeToString(data);
int index = encodedRow.length() - 1;
while (index >= 0 && encodedRow.charAt(index) == '=') {
index--;
}
encodedRow = encodedRow.substring(0, index + 1);
return encodedRow;
}
public static byte[] decodeBase64FileName(String node) {
return Base64.getUrlDecoder().decode(node);
}
}
| 9,596 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/Halt.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import org.apache.accumulo.core.util.threads.Threads;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Halt {
private static final Logger log = LoggerFactory.getLogger(Halt.class);
public static void halt(final String msg) {
// ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility
halt(0, new Runnable() {
@Override
public void run() {
log.error("FATAL {}", msg);
}
});
}
public static void halt(final String msg, int status) {
halt(status, new Runnable() {
@Override
public void run() {
log.error("FATAL {}", msg);
}
});
}
public static void halt(final int status, Runnable runnable) {
try {
// give ourselves a little time to try and do something
Threads.createThread("Halt Thread", () -> {
sleepUninterruptibly(100, MILLISECONDS);
Runtime.getRuntime().halt(status);
}).start();
if (runnable != null) {
runnable.run();
}
Runtime.getRuntime().halt(status);
} finally {
// In case something else decides to throw a Runtime exception
Runtime.getRuntime().halt(-1);
}
}
}
| 9,597 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/ShutdownUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import java.io.IOException;
public class ShutdownUtil {
/**
* Determine if a JVM shutdown is in progress.
*
*/
public static boolean isShutdownInProgress() {
try {
Runtime.getRuntime().removeShutdownHook(new Thread(() -> {}));
} catch (IllegalStateException ise) {
return true;
}
return false;
}
public static boolean isIOException(Throwable e) {
if (e == null) {
return false;
}
if (e instanceof IOException) {
return true;
}
for (Throwable suppressed : e.getSuppressed()) {
if (isIOException(suppressed)) {
return true;
}
}
return isIOException(e.getCause());
}
/**
* @return true if there is a possibility that the exception was caused by the hadoop shutdown
* hook closing the hadoop file system objects, otherwise false
*/
public static boolean wasCausedByHadoopShutdown(Exception e) {
return isShutdownInProgress() && isIOException(e);
}
}
| 9,598 |
0 | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/main/java/org/apache/accumulo/core/util/MutableByteSequence.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
public class MutableByteSequence extends ArrayByteSequence {
private static final long serialVersionUID = 1L;
public MutableByteSequence(byte[] data, int offset, int length) {
super(data, offset, length);
}
public MutableByteSequence(ByteSequence bs) {
super(new byte[Math.max(64, bs.length())]);
System.arraycopy(bs.getBackingArray(), bs.offset(), data, 0, bs.length());
this.length = bs.length();
this.offset = 0;
}
public void setArray(byte[] data, int offset, int len) {
this.data = data;
this.offset = offset;
this.length = len;
}
public void setLength(int len) {
this.length = len;
}
}
| 9,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.