index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/ValidatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.util.Optional;
import java.util.regex.Pattern;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ValidatorTest {
private Validator<String> v, v2, v3;
private static final Pattern STARTSWITH_C = Pattern.compile("c.*");
@BeforeEach
public void setUp() {
v = new Validator<>(
arg -> "correct".equals(arg) ? Validator.OK : Optional.of("Invalid argument " + arg));
v2 = new Validator<>(arg -> "righto".equals(arg) ? Validator.OK
: Optional.of("Not a correct argument : " + arg + " : done"));
v3 = new Validator<>(s -> s != null && STARTSWITH_C.matcher(s).matches() ? Validator.OK
: Optional.of("Invalid argument " + s));
}
@Test
public void testValidate_Success() {
assertEquals("correct", v.validate("correct"));
}
@Test
public void testValidate_Failure() {
// check default message maker
var e = assertThrows(IllegalArgumentException.class, () -> v.validate("incorrect"));
assertEquals("Invalid argument incorrect", e.getMessage());
// check custom message maker
e = assertThrows(IllegalArgumentException.class, () -> v2.validate("somethingwrong"));
assertEquals("Not a correct argument : somethingwrong : done", e.getMessage());
}
@Test
public void testAnd() {
Validator<String> vand = v3.and(v);
assertEquals("correct", vand.validate("correct"));
assertThrows(IllegalArgumentException.class, () -> vand.validate("righto"));
assertThrows(IllegalArgumentException.class, () -> vand.validate("coriander"));
}
@Test
public void testOr() {
Validator<String> vor = v.or(v2);
assertEquals("correct", vor.validate("correct"));
assertEquals("righto", vor.validate("righto"));
assertThrows(IllegalArgumentException.class, () -> vor.validate("coriander"));
}
@Test
public void testNot() {
Validator<String> vnot = v3.not();
var e = assertThrows(IllegalArgumentException.class, () -> vnot.validate("correct"));
assertEquals("Validation should have failed with: Invalid argument correct", e.getMessage());
e = assertThrows(IllegalArgumentException.class, () -> vnot.validate("coriander"));
assertEquals("Validation should have failed with: Invalid argument coriander", e.getMessage());
assertEquals("righto", vnot.validate("righto"));
assertEquals("anythingNotStartingWithLowercaseC",
vnot.validate("anythingNotStartingWithLowercaseC"));
}
}
| 9,300 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/RetryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.util.concurrent.TimeUnit.DAYS;
import static java.util.concurrent.TimeUnit.HOURS;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.util.Retry.NeedsLogInterval;
import org.apache.accumulo.core.util.Retry.NeedsMaxWait;
import org.apache.accumulo.core.util.Retry.NeedsRetries;
import org.apache.accumulo.core.util.Retry.NeedsRetryDelay;
import org.apache.accumulo.core.util.Retry.NeedsTimeIncrement;
import org.apache.accumulo.core.util.Retry.RetryFactory;
import org.easymock.EasyMock;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RetryTest {
private Retry retry;
private static final long INITIAL_WAIT = 1000;
private static final long WAIT_INC = 1000;
private static final double BACKOFF_FACTOR = 1.0;
private static final long MAX_RETRIES = 5;
private static final long LOG_INTERVAL = 1000;
private Retry unlimitedRetry;
private static final TimeUnit MS = MILLISECONDS;
private static final Logger log = LoggerFactory.getLogger(RetryTest.class);
@BeforeEach
public void setup() {
retry = Retry.builder().maxRetries(MAX_RETRIES).retryAfter(INITIAL_WAIT, MS)
.incrementBy(WAIT_INC, MS).maxWait(MAX_RETRIES * WAIT_INC, MS).backOffFactor(BACKOFF_FACTOR)
.logInterval(LOG_INTERVAL, MS).createRetry();
unlimitedRetry = Retry.builder().infiniteRetries().retryAfter(INITIAL_WAIT, MS)
.incrementBy(WAIT_INC, MS).maxWait(MAX_RETRIES * WAIT_INC, MS).backOffFactor(BACKOFF_FACTOR)
.logInterval(LOG_INTERVAL, MS).createRetry();
}
@Test
public void canRetryDoesntAlterState() {
for (int i = 0; i < MAX_RETRIES + 1; i++) {
assertTrue(retry.canRetry());
}
}
@Test
public void hasRetriedAfterUse() {
assertFalse(retry.hasRetried());
retry.useRetry();
assertTrue(retry.hasRetried());
}
@Test
public void retriesAreCompleted() {
for (int i = 0; i < MAX_RETRIES; i++) {
assertEquals(i, retry.retriesCompleted());
// canRetry doesn't alter retry's state
assertTrue(retry.canRetry());
assertEquals(i, retry.retriesCompleted());
// Using the retry will increase the internal count
retry.useRetry();
assertEquals(i + 1, retry.retriesCompleted());
}
}
@Test
public void usingNonExistentRetryFails() {
for (int i = 0; i < MAX_RETRIES; i++) {
assertTrue(retry.canRetry());
retry.useRetry();
}
assertFalse(retry.canRetry());
assertThrows(IllegalStateException.class, () -> retry.useRetry(),
"Calling useRetry when canRetry returns false throws an exception");
}
@Test
public void testWaitIncrement() throws InterruptedException {
retry = EasyMock.createMockBuilder(Retry.class).addMockedMethod("sleep").createStrictMock();
retry.setMaxRetries(MAX_RETRIES);
retry.setStartWait(INITIAL_WAIT);
retry.setWaitIncrement(WAIT_INC);
retry.setMaxWait(MAX_RETRIES * 1000);
retry.setBackOffFactor(1);
retry.setDoTimeJitter(false);
long currentWait = INITIAL_WAIT;
for (int i = 1; i <= MAX_RETRIES; i++) {
retry.sleep(currentWait);
EasyMock.expectLastCall();
currentWait += WAIT_INC;
}
EasyMock.replay(retry);
while (retry.canRetry()) {
retry.useRetry();
retry.waitForNextAttempt(log, "test wait increment");
}
EasyMock.verify(retry);
}
@Test
public void testBackOffFactor() throws InterruptedException {
retry = EasyMock.createMockBuilder(Retry.class).addMockedMethod("sleep").createStrictMock();
retry.setMaxRetries(MAX_RETRIES);
retry.setBackOffFactor(1.5);
retry.setStartWait(INITIAL_WAIT);
long waitIncrement = 0, currentWait = INITIAL_WAIT;
retry.setWaitIncrement(WAIT_INC);
retry.setMaxWait(MAX_RETRIES * 128000);
retry.setDoTimeJitter(false);
double backOfFactor = 1.5, originalBackoff = 1.5;
for (int i = 1; i <= MAX_RETRIES; i++) {
retry.sleep(currentWait);
double waitFactor = backOfFactor;
backOfFactor *= originalBackoff;
waitIncrement = (long) (Math.ceil(waitFactor * WAIT_INC));
currentWait = Math.min(retry.getMaxWait(), INITIAL_WAIT + waitIncrement);
EasyMock.expectLastCall();
}
EasyMock.replay(retry);
while (retry.canRetry()) {
retry.useRetry();
retry.waitForNextAttempt(log, "test backoff factor");
}
EasyMock.verify(retry);
}
@Test
public void testBoundedWaitIncrement() throws InterruptedException {
retry = EasyMock.createMockBuilder(Retry.class).addMockedMethod("sleep").createStrictMock();
retry.setMaxRetries(MAX_RETRIES);
retry.setStartWait(INITIAL_WAIT);
retry.setWaitIncrement(WAIT_INC);
// Make the last retry not increment in length
retry.setMaxWait((MAX_RETRIES - 1) * 1000);
retry.setBackOffFactor(1);
retry.setDoTimeJitter(false);
long currentWait = INITIAL_WAIT;
for (int i = 1; i <= MAX_RETRIES; i++) {
retry.sleep(currentWait);
EasyMock.expectLastCall();
if (i < MAX_RETRIES - 1) {
currentWait += WAIT_INC;
}
}
EasyMock.replay(retry);
while (retry.canRetry()) {
retry.useRetry();
retry.waitForNextAttempt(log, "test bounded wait increment");
}
EasyMock.verify(retry);
}
@Test
public void testIsMaxRetryDisabled() {
assertFalse(retry.hasInfiniteRetries());
assertTrue(unlimitedRetry.hasInfiniteRetries());
assertEquals(-1, unlimitedRetry.getMaxRetries());
}
@Test
public void testUnlimitedRetry() {
for (int i = 0; i < Integer.MAX_VALUE; i++) {
assertTrue(unlimitedRetry.canRetry());
unlimitedRetry.useRetry();
}
}
@Test
public void testLogging() {
Logger testLogger = EasyMock.createMock(Logger.class);
EasyMock.expect(testLogger.isDebugEnabled()).andReturn(true);
testLogger.debug(EasyMock.anyObject(String.class));
EasyMock.expectLastCall().times(1);
EasyMock.expect(testLogger.isTraceEnabled()).andReturn(true).anyTimes();
testLogger.trace(EasyMock.anyObject(String.class));
EasyMock.expectLastCall().anyTimes();
testLogger.warn(EasyMock.anyObject(String.class));
EasyMock.expectLastCall().times(3, 5);
EasyMock.replay(testLogger);
// we want to do this for 5 second and observe the log messages
long start = System.currentTimeMillis();
long end = System.currentTimeMillis();
int i = 0;
for (; (end - start < 5000) && (i < Integer.MAX_VALUE); i++) {
unlimitedRetry.logRetry(testLogger, "failure message");
unlimitedRetry.useRetry();
end = System.currentTimeMillis();
}
// now observe what log messages we got which should be around 5 +- 1
EasyMock.verify(testLogger);
assertTrue(i > 10);
}
@Test
public void testMaxRetries() {
NeedsRetries builder = Retry.builder();
builder.maxRetries(10);
builder.maxRetries(0);
assertThrows(IllegalArgumentException.class, () -> builder.maxRetries(-1),
"Should not allow negative retries");
}
@Test
public void testInitialWait() {
NeedsRetryDelay builder = Retry.builder().maxRetries(10);
builder.retryAfter(10, NANOSECONDS);
builder.retryAfter(10, MILLISECONDS);
builder.retryAfter(10, DAYS);
builder.retryAfter(0, NANOSECONDS);
builder.retryAfter(0, MILLISECONDS);
builder.retryAfter(0, DAYS);
assertThrows(IllegalArgumentException.class, () -> builder.retryAfter(-1, NANOSECONDS),
"Should not allow negative wait times");
}
@Test
public void testIncrementBy() {
NeedsTimeIncrement builder = Retry.builder().maxRetries(10).retryAfter(10, MILLISECONDS);
builder.incrementBy(10, DAYS);
builder.incrementBy(10, HOURS);
builder.incrementBy(10, NANOSECONDS);
builder.incrementBy(0, DAYS);
builder.incrementBy(0, HOURS);
builder.incrementBy(0, NANOSECONDS);
assertThrows(IllegalArgumentException.class, () -> builder.incrementBy(-1, NANOSECONDS),
"Should not allow negative increments");
}
@Test
public void testMaxWait() {
NeedsMaxWait builder =
Retry.builder().maxRetries(10).retryAfter(15, MILLISECONDS).incrementBy(10, MILLISECONDS);
builder.maxWait(15, MILLISECONDS);
builder.maxWait(16, MILLISECONDS);
assertThrows(IllegalArgumentException.class, () -> builder.maxWait(14, MILLISECONDS),
"Max wait time should be greater than or equal to initial wait time");
}
@Test
public void testLogInterval() {
NeedsLogInterval builder = Retry.builder().maxRetries(10).retryAfter(15, MILLISECONDS)
.incrementBy(10, MILLISECONDS).maxWait(16, MINUTES).backOffFactor(1);
builder.logInterval(10, DAYS);
builder.logInterval(10, HOURS);
builder.logInterval(10, NANOSECONDS);
builder.logInterval(0, DAYS);
builder.logInterval(0, HOURS);
builder.logInterval(0, NANOSECONDS);
assertThrows(IllegalArgumentException.class, () -> builder.logInterval(-1, NANOSECONDS),
"Log interval must not be negative");
}
@Test
public void properArgumentsInRetry() {
long maxRetries = 10, startWait = 50L, maxWait = 5000L, waitIncrement = 500L,
logInterval = 10000L;
RetryFactory factory = Retry.builder().maxRetries(maxRetries).retryAfter(startWait, MS)
.incrementBy(waitIncrement, MS).maxWait(maxWait, MS).backOffFactor(1)
.logInterval(logInterval, MS).createFactory();
Retry retry = factory.createRetry();
assertEquals(maxRetries, retry.getMaxRetries());
assertEquals(startWait, retry.getCurrentWait());
assertEquals(maxWait, retry.getMaxWait());
assertEquals(waitIncrement, retry.getWaitIncrement());
assertEquals(logInterval, retry.getLogInterval());
}
@Test
public void properArgumentsInUnlimitedRetry() {
long startWait = 50L, maxWait = 5000L, waitIncrement = 500L, logInterval = 10000L;
double waitFactor = 1.0;
RetryFactory factory = Retry.builder().infiniteRetries().retryAfter(startWait, MS)
.incrementBy(waitIncrement, MS).maxWait(maxWait, MS).backOffFactor(waitFactor)
.logInterval(logInterval, MS).createFactory();
Retry retry = factory.createRetry();
assertEquals(-1, retry.getMaxRetries());
assertEquals(startWait, retry.getCurrentWait());
assertEquals(maxWait, retry.getMaxWait());
assertEquals(waitIncrement, retry.getWaitIncrement());
assertEquals(logInterval, retry.getLogInterval());
}
@Test
public void testInfiniteRetryWithBackoff() throws InterruptedException {
Retry retry = Retry.builder().infiniteRetries().retryAfter(100, MILLISECONDS)
.incrementBy(100, MILLISECONDS).maxWait(500, MILLISECONDS).backOffFactor(1.5)
.logInterval(3, MINUTES).createRetry();
for (int i = 0; i < 100; i++) {
try {
retry.waitForNextAttempt(log, i + "");
} catch (IllegalArgumentException e) {
log.error("Failed on iteration: {}", i);
throw e;
}
}
}
}
| 9,301 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/InternerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertSame;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
public class InternerTest {
private class TestObj {
private final int id;
TestObj(int id) {
this.id = id;
}
@Override
public int hashCode() {
return id;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof TestObj) {
return ((TestObj) obj).id == this.id;
}
return false;
}
}
@Test
public void testInternDedupes() {
var interner = new Interner<TestObj>();
var obj1 = new TestObj(1);
var obj1_dupe = new TestObj(1);
assertSame(obj1, obj1);
assertNotSame(obj1, obj1_dupe);
assertEquals(obj1, obj1_dupe);
assertEquals(obj1.hashCode(), obj1_dupe.hashCode());
// verify object gets added to the intern pool
assertSame(obj1, interner.intern(obj1));
assertEquals(1, interner.size());
// verify equivalent, but not the same object, gets deduplicated
assertSame(obj1, interner.intern(obj1_dupe));
assertEquals(1, interner.size());
// verify second object grows the intern pool size
var obj2 = new TestObj(2);
assertNotSame(obj1, obj2);
assertNotEquals(obj1, obj2);
var intern2 = interner.intern(obj2);
assertEquals(2, interner.size());
// sanity check to ensure we got the same object back for obj2, and it's not mangled with obj1
assertSame(obj2, intern2);
assertNotEquals(obj1, intern2);
}
@Test
@Timeout(20)
public void testInternsGetGarbageCollected() {
var interner = new Interner<TestObj>();
assertEquals(0, interner.size()); // ensure empty
// add one and keep a strong reference
var obj1 = interner.intern(new TestObj(1));
assertEquals(1, interner.size());
// try to add a second, weakly referenced object until it sticks (may be GC'd between checks)
do {
interner.intern(new TestObj(2));
} while (interner.size() != 2);
// best effort to GC until the weakly reachable object goes away or until test times out
do {
System.gc();
} while (interner.size() != 1);
// ensure obj1 is still interned (because we kept a strong reference)
assertSame(obj1, interner.intern(new TestObj(1)));
// ensure second test object is entirely new (previous ones should have been GC'd)
var obj2 = new TestObj(2);
assertSame(obj2, interner.intern(obj2));
assertEquals(2, interner.size());
}
}
| 9,302 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/ValidatorsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.List;
import java.util.function.Consumer;
import org.apache.accumulo.core.clientImpl.Namespace;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.RootTable;
import org.apache.commons.lang3.StringUtils;
import org.junit.jupiter.api.Test;
public class ValidatorsTest {
private static <T> void checkNull(Consumer<T> nullConsumer) {
var e = assertThrows(IllegalArgumentException.class, () -> nullConsumer.accept(null));
assertTrue(e.getMessage().endsWith("must not be null"));
}
private static <T> void assertAllValidate(Validator<T> v, List<T> items) {
assertFalse(items.isEmpty(), "nothing to check");
items.forEach(item -> assertSame(item, v.validate(item)));
}
private static <T> void assertAllThrow(Validator<T> v, List<T> items) {
assertFalse(items.isEmpty(), "nothing to check");
items.forEach(item -> assertThrows(IllegalArgumentException.class, () -> v.validate(item),
String.valueOf(item)));
}
@Test
public void test_CAN_CLONE_TABLE() {
Validator<TableId> v = Validators.CAN_CLONE_TABLE;
checkNull(v::validate);
assertAllValidate(v, List.of(TableId.of("id1")));
assertAllThrow(v, List.of(RootTable.ID, MetadataTable.ID));
}
@Test
public void test_EXISTING_NAMESPACE_NAME() {
Validator<String> v = Validators.EXISTING_NAMESPACE_NAME;
checkNull(v::validate);
assertAllValidate(v, List.of(Namespace.DEFAULT.name(), Namespace.ACCUMULO.name(), "normalNs",
"withNumber2", "has_underscore", "_underscoreStart", StringUtils.repeat("a", 1025)));
assertAllThrow(v, List.of("has.dot", "has-dash", " hasSpace", ".", "has$dollar"));
}
@Test
public void test_EXISTING_TABLE_NAME() {
Validator<String> v = Validators.EXISTING_TABLE_NAME;
checkNull(v::validate);
assertAllValidate(v,
List.of(RootTable.NAME, MetadataTable.NAME, "normalTable", "withNumber2", "has_underscore",
"_underscoreStart", StringUtils.repeat("a", 1025),
StringUtils.repeat("a", 1025) + "." + StringUtils.repeat("a", 1025)));
assertAllThrow(v, List.of("has-dash", "has-dash.inNamespace", "has.dash-inTable", " hasSpace",
".", "has$dollar", "two.dots.here", ".startsDot"));
}
@Test
public void test_NEW_NAMESPACE_NAME() {
Validator<String> v = Validators.NEW_NAMESPACE_NAME;
checkNull(v::validate);
assertAllValidate(v, List.of(Namespace.DEFAULT.name(), Namespace.ACCUMULO.name(), "normalNs",
"withNumber2", "has_underscore", "_underscoreStart", StringUtils.repeat("a", 1024)));
assertAllThrow(v, List.of("has.dot", "has-dash", " hasSpace", ".", "has$dollar",
StringUtils.repeat("a", 1025)));
}
@Test
public void test_NEW_TABLE_NAME() {
Validator<String> v = Validators.NEW_TABLE_NAME;
checkNull(v::validate);
assertAllValidate(v,
List.of(RootTable.NAME, MetadataTable.NAME, "normalTable", "withNumber2", "has_underscore",
"_underscoreStart", StringUtils.repeat("a", 1024),
StringUtils.repeat("a", 1025) + "." + StringUtils.repeat("a", 1024)));
assertAllThrow(v,
List.of("has-dash", "has-dash.inNamespace", "has.dash-inTable", " hasSpace", ".",
"has$dollar", "two.dots.here", ".startsDot", StringUtils.repeat("a", 1025),
StringUtils.repeat("a", 1025) + "." + StringUtils.repeat("a", 1025)));
}
@Test
public void test_NOT_BUILTIN_NAMESPACE() {
Validator<String> v = Validators.NOT_BUILTIN_NAMESPACE;
checkNull(v::validate);
assertAllValidate(v, List.of("root", "metadata", " .#!)(*$&^", " ")); // doesn't validate name
assertAllThrow(v, List.of(Namespace.DEFAULT.name(), Namespace.ACCUMULO.name()));
}
@Test
public void test_NOT_BUILTIN_TABLE() {
Validator<String> v = Validators.NOT_BUILTIN_TABLE;
checkNull(v::validate);
assertAllValidate(v, List.of("root", "metadata", "user", "ns1.table2"));
assertAllThrow(v, List.of(RootTable.NAME, MetadataTable.NAME));
}
@Test
public void test_NOT_METADATA_TABLE() {
Validator<String> v = Validators.NOT_METADATA_TABLE;
checkNull(v::validate);
assertAllValidate(v, List.of("root", "metadata", "user", "ns1.table2"));
assertAllThrow(v, List.of(RootTable.NAME, MetadataTable.NAME));
}
@Test
public void test_NOT_ROOT_TABLE_ID() {
Validator<TableId> v = Validators.NOT_ROOT_TABLE_ID;
checkNull(v::validate);
assertAllValidate(v, List.of(TableId.of(""), MetadataTable.ID, TableId.of(" #0(U!$. ")));
assertAllThrow(v, List.of(RootTable.ID));
}
@Test
public void test_VALID_TABLE_ID() {
Validator<TableId> v = Validators.VALID_TABLE_ID;
checkNull(v::validate);
assertAllValidate(v, List.of(RootTable.ID, MetadataTable.ID, TableId.of("111"),
TableId.of("aaaa"), TableId.of("r2d2")));
assertAllThrow(v, List.of(TableId.of(""), TableId.of("#0(U!$"), TableId.of(" #0(U!$. "),
TableId.of("."), TableId.of(" "), TableId.of("C3P0")));
}
@Test
public void test_sameNamespaceAs() {
checkNull(Validators::sameNamespaceAs);
Validator<String> inDefaultNS = Validators.sameNamespaceAs("tableInDefaultNamespace");
checkNull(inDefaultNS::validate);
assertAllValidate(inDefaultNS, List.of("t1"));
assertAllThrow(inDefaultNS, List.of("accumulo.other", "other.t2", ".", "other.", ".malformed"));
Validator<String> inOtherNS = Validators.sameNamespaceAs("other.tableInOtherNamespace");
checkNull(inOtherNS::validate);
assertAllValidate(inOtherNS, List.of("other.t1", "other.t2"));
assertAllThrow(inOtherNS, List.of("other.", "other", "else.t3"));
}
}
| 9,303 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/ThriftMessageUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.accumulo.core.securityImpl.thrift.TAuthenticationTokenIdentifier;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ThriftMessageUtilTest {
private TAuthenticationTokenIdentifier msg;
private ThriftMessageUtil util;
@BeforeEach
public void setup() throws IOException {
msg = new TAuthenticationTokenIdentifier("principal");
util = new ThriftMessageUtil();
}
@Test
public void testSerializationAsByteArray() throws IOException {
ByteBuffer buff = util.serialize(msg);
TAuthenticationTokenIdentifier copy = new TAuthenticationTokenIdentifier();
byte[] array = new byte[buff.limit()];
System.arraycopy(buff.array(), 0, array, 0, buff.limit());
util.deserialize(array, copy);
assertEquals(msg, copy);
}
@Test
public void testSerializationAsByteArrayWithLimits() throws IOException {
ByteBuffer buff = util.serialize(msg);
TAuthenticationTokenIdentifier copy = new TAuthenticationTokenIdentifier();
byte[] array = new byte[buff.limit() + 14];
// Throw some garbage in front and behind the actual message
array[0] = 'G';
array[1] = 'A';
array[2] = 'R';
array[3] = 'B';
array[4] = 'A';
array[5] = 'G';
array[6] = 'E';
System.arraycopy(buff.array(), 0, array, 7, buff.limit());
array[7 + buff.limit()] = 'G';
array[7 + buff.limit() + 1] = 'A';
array[7 + buff.limit() + 2] = 'R';
array[7 + buff.limit() + 3] = 'B';
array[7 + buff.limit() + 4] = 'A';
array[7 + buff.limit() + 5] = 'G';
array[7 + buff.limit() + 6] = 'E';
util.deserialize(array, 7, buff.limit(), copy);
assertEquals(msg, copy);
}
}
| 9,304 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/PartitionerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.util.LocalityGroupUtil.Partitioner;
import org.apache.commons.lang3.mutable.MutableLong;
import org.junit.jupiter.api.Test;
public class PartitionerTest {
@Test
public void test1() {
PreAllocatedArray<Map<ByteSequence,MutableLong>> groups = new PreAllocatedArray<>(2);
groups.set(0, new HashMap<>());
groups.get(0).put(new ArrayByteSequence("cf1"), new MutableLong(1));
groups.get(0).put(new ArrayByteSequence("cf2"), new MutableLong(1));
groups.set(1, new HashMap<>());
groups.get(1).put(new ArrayByteSequence("cf3"), new MutableLong(1));
Partitioner p1 = new Partitioner(groups);
Mutation m1 = new Mutation("r1");
m1.put("cf1", "cq1", "v1");
Mutation m2 = new Mutation("r2");
m2.put("cf1", "cq1", "v2");
m2.put("cf2", "cq2", "v3");
Mutation m3 = new Mutation("r3");
m3.put("cf1", "cq1", "v4");
m3.put("cf3", "cq2", "v5");
Mutation m4 = new Mutation("r4");
m4.put("cf1", "cq1", "v6");
m4.put("cf3", "cq2", "v7");
m4.put("cf5", "cq3", "v8");
Mutation m5 = new Mutation("r5");
m5.put("cf5", "cq3", "v9");
List<Mutation> mutations = Arrays.asList(m1, m2, m3, m4, m5);
PreAllocatedArray<List<Mutation>> partitioned = new PreAllocatedArray<>(3);
for (int i = 0; i < partitioned.length; i++) {
partitioned.set(i, new ArrayList<>());
}
p1.partition(mutations, partitioned);
m1 = new Mutation("r1");
m1.put("cf1", "cq1", "v1");
m2 = new Mutation("r2");
m2.put("cf1", "cq1", "v2");
m2.put("cf2", "cq2", "v3");
m3 = new Mutation("r3");
m3.put("cf1", "cq1", "v4");
m4 = new Mutation("r4");
m4.put("cf1", "cq1", "v6");
assertEquals(toKeySet(m1, m2, m3, m4), toKeySet(partitioned.get(0)));
m3 = new Mutation("r3");
m3.put("cf3", "cq2", "v5");
m4 = new Mutation("r4");
m4.put("cf3", "cq2", "v7");
assertEquals(toKeySet(m3, m4), toKeySet(partitioned.get(1)));
m4 = new Mutation("r4");
m4.put("cf5", "cq3", "v8");
assertEquals(toKeySet(m4, m5), toKeySet(partitioned.get(2)));
}
private Set<Key> toKeySet(List<Mutation> mutations) {
return toKeySet(mutations.toArray(new Mutation[0]));
}
private Set<Key> toKeySet(Mutation... expected) {
HashSet<Key> ret = new HashSet<>();
for (Mutation mutation : expected) {
for (ColumnUpdate cu : mutation.getUpdates()) {
ret.add(new Key(mutation.getRow(), cu.getColumnFamily(), cu.getColumnQualifier(),
cu.getColumnVisibility(), cu.getTimestamp()));
}
}
return ret;
}
}
| 9,305 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/HostAndPortComparatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.junit.jupiter.api.Test;
import com.google.common.net.HostAndPort;
class HostAndPortComparatorTest {
final static private Comparator<HostAndPort> COMPARATOR = new HostAndPortComparator();
@Test
void testCompare() {
HostAndPort hostAndPort1 = HostAndPort.fromString("example.info");
HostAndPort hostAndPort2 = HostAndPort.fromString("example.com");
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) > 0);
HostAndPort hostPortSame = HostAndPort.fromString("www.test.com");
assertTrue(COMPARATOR.compare(hostPortSame, hostPortSame) == 0);
hostAndPort1 = HostAndPort.fromString("www.example.com");
hostAndPort2 = HostAndPort.fromString("www.example.com");
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) == 0);
hostAndPort1 = HostAndPort.fromString("192.0.2.1:80");
hostAndPort2 = HostAndPort.fromString("192.0.2.1");
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) > 0);
hostAndPort1 = HostAndPort.fromString("[2001:db8::1]");
hostAndPort2 = HostAndPort.fromString("[2001:db9::1]");
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) < 0);
hostAndPort1 = HostAndPort.fromString("2001:db8:3333:4444:5555:6676:7777:8888");
hostAndPort2 = HostAndPort.fromString("2001:db8:3333:4444:5555:6666:7777:8888");
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) > 0);
hostAndPort1 = HostAndPort.fromString("192.0.2.1:80");
hostAndPort2 = HostAndPort.fromString("192.1.2.1");
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) < 0);
hostAndPort1 = HostAndPort.fromString("12.1.2.1");
hostAndPort2 = HostAndPort.fromString("192.1.2.1");
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) < 0);
hostAndPort1 = HostAndPort.fromString("wwww.example.com");
hostAndPort2 = HostAndPort.fromString("192.1.2.1");
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) > 0);
hostAndPort1 = HostAndPort.fromString("2001:db8::1");
hostAndPort2 = HostAndPort.fromString("2001:db9::1");
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) < 0);
hostAndPort1 = HostAndPort.fromString("");
hostAndPort2 = HostAndPort.fromString("2001:db9::1");
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) < 0);
assertTrue(COMPARATOR.compare(hostAndPort2, hostAndPort1) > 0);
hostAndPort1 = HostAndPort.fromString("2001:db8::1");
hostAndPort2 = null;
assertTrue(COMPARATOR.compare(hostAndPort1, hostAndPort2) > 0);
}
@Test
void testOrder() {
Set<HostAndPort> hostPortSet = Stream
.of("example.info", "192.12.2.1:80", "example.com:80", "a.bb.c.d", "12.1.2.1",
"localhost:0000090", "example.com", "100.100.100.100", "www.example.com",
"[2001:eb8::1]", "localhost:90", "[2001:eb8::1]:80", "2001:db8::1", "100.100.101.100",
"2001:::1", "192.12.2.1", "192.12.2.1:81", "199.10.1.1:14", "10.100.100.100",
"2.2.2.2:10000", "192.12.2.1:79", "1.1.1.1:24", "1.1.1.1", "192.12.2.1:79", "a.b.c.d",
"1.100.100.100", "2.2.2.2:9999", "a.b.b.d", "www.example.com", "www.alpha.org",
"a.b.c.d:10", "a.b.b.d:10", "a.b.b.d:11")
.map(HostAndPort::fromString)
.collect(Collectors.toCollection(() -> new TreeSet<>(COMPARATOR)));
hostPortSet.add(HostAndPort.fromParts("localhost", 1));
hostPortSet.add(HostAndPort.fromParts("localhost", 000001));
List<HostAndPort> expected = Stream
.of("1.1.1.1", "1.1.1.1:24", "1.100.100.100", "10.100.100.100", "100.100.100.100",
"100.100.101.100", "12.1.2.1", "192.12.2.1", "192.12.2.1:79", "192.12.2.1:80",
"192.12.2.1:81", "199.10.1.1:14", "2.2.2.2:9999", "2.2.2.2:10000", "[2001:::1]",
"[2001:db8::1]", "[2001:eb8::1]", "[2001:eb8::1]:80", "a.b.b.d", "a.b.b.d:10",
"a.b.b.d:11", "a.b.c.d", "a.b.c.d:10", "a.bb.c.d", "example.com", "example.com:80",
"example.info", "localhost:1", "localhost:90", "www.alpha.org", "www.example.com")
.map(HostAndPort::fromString).collect(Collectors.toList());
Object[] expectedArray = expected.toArray();
Object[] hostPortArray = hostPortSet.toArray();
for (int i = 0; i < expected.size(); i++) {
assertEquals(expectedArray[i].toString(), hostPortArray[i].toString());
}
}
}
| 9,306 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/ByteBufferUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class ByteBufferUtilTest {
private static void assertEqualsBB(String expected, ByteBuffer bb) {
assertEquals(new Text(expected), ByteBufferUtil.toText(bb));
assertEquals(expected, new String(ByteBufferUtil.toBytes(bb), UTF_8));
assertEquals(expected, ByteBufferUtil.toString(bb));
List<byte[]> bal = ByteBufferUtil.toBytesList(Collections.singletonList(bb));
assertEquals(1, bal.size());
assertEquals(expected, new String(bal.get(0), UTF_8));
assertEquals(new ArrayByteSequence(expected), new ArrayByteSequence(bb));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
try {
ByteBufferUtil.write(dos, bb);
dos.close();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
assertEquals(expected, new String(baos.toByteArray(), UTF_8));
ByteArrayInputStream bais = ByteBufferUtil.toByteArrayInputStream(bb);
byte[] buffer = new byte[expected.length()];
try {
bais.read(buffer);
assertEquals(expected, new String(buffer, UTF_8));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Test
public void testNonZeroArrayOffset() {
byte[] data = "0123456789".getBytes(UTF_8);
ByteBuffer bb1 = ByteBuffer.wrap(data, 3, 4);
// create a ByteBuffer with a non-zero array offset
ByteBuffer bb2 = bb1.slice();
// The purpose of this test is to ensure ByteBufferUtil code works when arrayOffset is non-zero.
// The following asserts are not to test ByteBuffer, but
// ensure the behavior of slice() is as expected.
assertEquals(3, bb2.arrayOffset());
assertEquals(0, bb2.position());
assertEquals(4, bb2.limit());
// start test with non zero arrayOffset
assertEqualsBB("3456", bb2);
// read one byte from byte buffer... this should cause position to be non-zero in addition to
// array offset
bb2.get();
assertEqualsBB("456", bb2);
}
@Test
public void testZeroArrayOffsetAndNonZeroPosition() {
byte[] data = "0123456789".getBytes(UTF_8);
ByteBuffer bb1 = ByteBuffer.wrap(data, 3, 4);
assertEqualsBB("3456", bb1);
}
@Test
public void testZeroArrayOffsetAndPosition() {
byte[] data = "0123456789".getBytes(UTF_8);
ByteBuffer bb1 = ByteBuffer.wrap(data, 0, 4);
assertEqualsBB("0123", bb1);
}
@Test
public void testDirectByteBuffer() {
// allocate direct so it does not have a backing array
ByteBuffer bb = ByteBuffer.allocateDirect(10);
bb.put("0123456789".getBytes(UTF_8));
bb.rewind();
assertEqualsBB("0123456789", bb);
// advance byte buffer position
bb.get();
assertEqualsBB("123456789", bb);
}
}
| 9,307 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/ConfigurationImplTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import java.util.Map;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.junit.jupiter.api.Test;
public class ConfigurationImplTest {
@Test
public void testCustomProps() {
ConfigurationCopy conf = new ConfigurationCopy();
conf.set("table.custom.p1", "v1");
conf.set("table.custom.p2", "v2");
conf.set("general.custom.p3", "v3");
conf.set("general.custom.p4", "v4");
var confImp = new ConfigurationImpl(conf);
assertEquals(Map.of("p3", "v3", "p4", "v4"), confImp.getCustom());
assertEquals(Map.of("p1", "v1", "p2", "v2"), confImp.getTableCustom());
assertEquals("v3", confImp.getCustom("p3"));
assertEquals("v1", confImp.getTableCustom("p1"));
assertNull(confImp.getCustom("p1"));
assertNull(confImp.getTableCustom("p3"));
// ensure changes to custom props are seen
conf.set("general.custom.p4", "v5");
conf.set("table.custom.p2", "v6");
conf.set("table.custom.p5", "v7");
assertEquals(Map.of("p3", "v3", "p4", "v5"), confImp.getCustom());
assertEquals(Map.of("p1", "v1", "p2", "v6", "p5", "v7"), confImp.getTableCustom());
assertEquals("v3", confImp.getCustom("p3"));
assertEquals("v5", confImp.getCustom("p4"));
assertEquals("v1", confImp.getTableCustom("p1"));
assertEquals("v6", confImp.getTableCustom("p2"));
assertNull(confImp.getCustom("p5"));
assertNull(confImp.getTableCustom("p4"));
}
}
| 9,308 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/OpTimerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Exercise basic timer (org.apache.hadoop.util.StopWatch) functionality. Current usage requires
* ability to reset timer.
*/
public class OpTimerTest {
private static final Logger log = LoggerFactory.getLogger(OpTimerTest.class);
/**
* Validate reset functionality
*/
@Test
public void verifyReset() throws InterruptedException {
OpTimer timer = new OpTimer().start();
Thread.sleep(50);
timer.stop();
long tValue = timer.now();
log.debug("Time value before reset {}", String.format("%.3f ms", timer.scale(MILLISECONDS)));
timer.reset().start();
Thread.sleep(1);
timer.stop();
assertTrue(timer.now() > 0);
assertTrue(tValue > timer.now());
timer.reset();
log.debug("Time value after reset {}", String.format("%.3f ms", timer.scale(MILLISECONDS)));
assertEquals(0, timer.now());
}
/**
* Verify that IllegalStateException is thrown when calling stop when timer has not been started.
*/
@Test
public void verifyExceptionCallingStopWhenNotStarted() {
OpTimer timer = new OpTimer();
assertFalse(timer.isRunning());
// should throw exception - not running
assertThrows(IllegalStateException.class, timer::stop,
"Should not be able to call stop on a timer that is not running");
}
/**
* Verify that IllegalStateException is thrown when calling start on running timer.
*/
@Test
public void verifyExceptionCallingStartWhenRunning() throws InterruptedException {
OpTimer timer = new OpTimer().start();
Thread.sleep(50);
assertTrue(timer.isRunning());
// should throw exception - already running
assertThrows(IllegalStateException.class, timer::start,
"Should not be able to call start on a timer that is already running");
}
/**
* Verify that IllegalStateException is thrown when calling stop when not running.
*/
@Test
public void verifyExceptionCallingStopWhenNotRunning() throws InterruptedException {
OpTimer timer = new OpTimer().start();
Thread.sleep(50);
assertTrue(timer.isRunning());
timer.stop();
assertFalse(timer.isRunning());
assertThrows(IllegalStateException.class, timer::stop,
"Should not be able to call stop on a timer that is not running");
}
/**
* Validate that start / stop accumulates time.
*/
@Test
public void verifyElapsed() throws InterruptedException {
OpTimer timer = new OpTimer().start();
Thread.sleep(50);
timer.stop();
long tValue = timer.now();
log.debug("Time value after first stop {}",
String.format("%.3f ms", timer.scale(MILLISECONDS)));
timer.start();
Thread.sleep(10);
timer.stop();
log.debug("Time value after second stop {}",
String.format("%.3f ms", timer.scale(MILLISECONDS)));
assertTrue(tValue < timer.now(), "The timer did not increase in value over time");
}
/**
* Validate that scale returns correct values.
*/
@Test
public void scale() throws InterruptedException {
OpTimer timer = new OpTimer().start();
Thread.sleep(50);
timer.stop();
long tValue = timer.now();
double nanosPerMillisecond = 1_000_000.0;
assertEquals(tValue / nanosPerMillisecond, timer.scale(MILLISECONDS), 0.00000001);
double nanosPerSecond = 1_000_000_000.0;
assertEquals(tValue / nanosPerSecond, timer.scale(SECONDS), 0.00000001);
}
}
| 9,309 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/PairTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertNull;
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.Map.Entry;
import org.junit.jupiter.api.Test;
public class PairTest {
/**
* Test method for {@link org.apache.accumulo.core.util.Pair#hashCode()}.
*/
@Test
public void testHashMethod() {
Pair<Integer,String> pair1 = new Pair<>(25, "twenty-five");
Pair<Integer,String> pair2 = new Pair<>(25, "twenty-five");
Pair<Integer,String> pair3 = new Pair<>(null, null);
Pair<Integer,String> pair4 = new Pair<>(25, "twentyfive");
Pair<Integer,String> pair5 = new Pair<>(225, "twenty-five");
assertNotSame(pair1, pair2);
assertEquals(pair1.hashCode(), pair2.hashCode());
assertNotSame(pair2, pair3);
assertNotEquals(pair1.hashCode(), pair4.hashCode());
assertNotEquals(pair1.hashCode(), pair5.hashCode());
}
/**
* Test method for {@link org.apache.accumulo.core.util.Pair#equals(java.lang.Object)}.
*/
@Test
public void testEqualsObject() {
Pair<Integer,String> pair1 = new Pair<>(25, "twenty-five");
Pair<Integer,String> pair2 = new Pair<>(25, "twenty-five");
Pair<Integer,String> pair3 = new Pair<>(25, "twentyfive");
Pair<Integer,String> null1 = null;
assertEquals(pair1, pair1);
assertEquals(pair2, pair1);
assertNotEquals(pair1, pair3);
// verify direct calls
assertEquals(pair1, pair2);
assertEquals(pair2, pair1);
assertNotEquals(pair1, pair3);
// check null
assertEquals(null1, null1);
assertNull(null1);
assertNotEquals(pair1, null1);
}
/**
* Test method for {@link org.apache.accumulo.core.util.Pair#getFirst()}.
*/
@Test
public void testGetFirst() {
Pair<Integer,String> pair = new Pair<>(25, "twenty-five");
assertEquals((Integer) 25, pair.getFirst());
}
/**
* Test method for {@link org.apache.accumulo.core.util.Pair#getSecond()}.
*/
@Test
public void testGetSecond() {
Pair<Integer,String> pair = new Pair<>(25, "twenty-five");
assertEquals("twenty-five", pair.getSecond());
}
/**
* Test method for {@link org.apache.accumulo.core.util.Pair#toString()}.
*/
@Test
public void testToString() {
Pair<Integer,String> pair = new Pair<>(25, "twenty-five");
assertEquals("(25,twenty-five)", pair.toString());
}
/**
* Test method for
* {@link org.apache.accumulo.core.util.Pair#toString(java.lang.String, java.lang.String, java.lang.String)}.
*/
@Test
public void testToStringStringStringString() {
Pair<Integer,String> pair = new Pair<>(25, "twenty-five");
assertEquals("---25~~~twenty-five+++", pair.toString("---", "~~~", "+++"));
}
/**
* Test method for {@link org.apache.accumulo.core.util.Pair#toMapEntry()}.
*/
@Test
public void testToMapEntry() {
Pair<Integer,String> pair = new Pair<>(10, "IO");
Entry<Integer,String> entry = pair.toMapEntry();
assertEquals(pair.getFirst(), entry.getKey());
assertEquals(pair.getSecond(), entry.getValue());
}
/**
* Test method for {@link org.apache.accumulo.core.util.Pair#swap()}.
*/
@Test
public void testSwap() {
Pair<Integer,String> pair = new Pair<>(25, "twenty-five");
assertEquals(pair, pair.swap().swap());
Pair<String,Integer> pair2 = new Pair<>("twenty-five", 25);
assertEquals(pair, pair2.swap());
assertEquals(pair2, pair.swap());
}
/**
* Test method for {@link org.apache.accumulo.core.util.Pair#fromEntry(java.util.Map.Entry)}.
*/
@Test
public void testFromEntry() {
Entry<Integer,String> entry = new SimpleImmutableEntry<>(10, "IO");
Pair<Integer,String> pair0 = Pair.fromEntry(entry);
assertEquals(entry.getKey(), pair0.getFirst());
assertEquals(entry.getValue(), pair0.getSecond());
Pair<Object,Object> pair = Pair.fromEntry(entry);
assertEquals(entry.getKey(), pair.getFirst());
assertEquals(entry.getValue(), pair.getSecond());
Pair<Number,CharSequence> pair2 = Pair.fromEntry(entry);
assertEquals(entry.getKey(), pair2.getFirst());
assertEquals(entry.getValue(), pair2.getSecond());
}
}
| 9,310 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/AddressUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
import java.security.Security;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Test the AddressUtil class.
*/
public class AddressUtilTest {
private static final Logger log = LoggerFactory.getLogger(AddressUtilTest.class);
@Test
public void testGetNegativeTtl() {
log.info("Checking that we can get the ttl on dns failures.");
int expectedTtl = 20;
boolean expectException = false;
/* TODO ACCUMULO-2242 replace all of this with Powermock on the Security class */
try {
Security.setProperty("networkaddress.cache.negative.ttl", Integer.toString(expectedTtl));
} catch (SecurityException exception) {
log.warn(
"We can't set the DNS cache period, so we're only testing fetching the system value.");
expectedTtl = 10;
}
try {
expectedTtl = Integer.parseInt(Security.getProperty("networkaddress.cache.negative.ttl"));
} catch (SecurityException exception) {
log.debug("Security manager won't let us fetch the property, testing default path.");
expectedTtl = 10;
} catch (NumberFormatException exception) {
log.debug("property isn't a number, testing default path.");
expectedTtl = 10;
}
if (-1 == expectedTtl) {
log.debug("property is set to 'forever', testing exception path");
expectException = true;
}
if (0 > expectedTtl) {
log.debug("property is a negative value other than 'forever', testing default path.");
expectedTtl = 10;
}
try {
if (expectException) {
log.info("AddressUtil is (hopefully) going to spit out an error about DNS lookups. "
+ "you can ignore it.");
}
int result = AddressUtil.getAddressCacheNegativeTtl(null);
if (expectException) {
fail("The JVM Security settings cache DNS failures forever. "
+ "In this case we expect an exception but didn't get one.");
}
assertEquals(expectedTtl, result, "Didn't get the ttl we expected");
} catch (IllegalArgumentException exception) {
if (!expectException) {
log.error("Got an exception when we weren't expecting.", exception);
fail("We only expect to throw an IllegalArgumentException when the JVM "
+ "caches DNS failures forever.");
}
}
}
@Test
public void testGetNegativeTtlThrowsOnForever() {
log.info("When DNS is cached forever, we should throw.");
/* TODO ACCUMULO-2242 replace all of this with Powermock on the Security class */
try {
Security.setProperty("networkaddress.cache.negative.ttl", "-1");
} catch (SecurityException exception) {
log.error("We can't set the DNS cache period, so this test is effectively ignored.");
return;
}
log.info("AddressUtil is (hopefully) going to spit out an error about DNS lookups. "
+ "you can ignore it.");
assertThrows(IllegalArgumentException.class, () -> AddressUtil.getAddressCacheNegativeTtl(null),
"The JVM Security settings cache DNS failures forever, this should cause an exception.");
}
}
| 9,311 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/StatTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class StatTest {
static double delta = 0.0000001;
Stat zero;
Stat stat;
@BeforeEach
public void setUp() {
zero = new Stat();
zero.addStat(0);
stat = new Stat();
// The mean and sd for this set were checked against wolfram alpha
for (Long l : new long[] {9792, 5933, 4766, 5770, 3763, 3677, 5002}) {
stat.addStat(l);
}
}
@Test
public void testGetMin() {
assertEquals(0, zero.min());
assertEquals(3677, stat.min());
}
@Test
public void testGetMax() {
assertEquals(0, zero.max());
assertEquals(9792, stat.max());
}
@Test
public void testGetAverage() {
assertEquals(0, zero.mean(), delta);
assertEquals(5529, stat.mean(), delta);
}
@Test
public void testGetSum() {
assertEquals(0, zero.sum());
assertEquals(38703, stat.sum());
}
@Test
public void testClear() {
zero.clear();
stat.clear();
assertEquals(0, zero.max());
assertEquals(zero.max(), stat.max());
assertEquals(0, zero.min());
assertEquals(zero.min(), stat.min());
assertEquals(0, zero.sum());
assertEquals(zero.sum(), stat.sum());
assertEquals(Double.NaN, zero.mean(), 0);
assertEquals(zero.mean(), stat.mean(), 0);
}
}
| 9,312 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/LocalityGroupUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class LocalityGroupUtilTest {
@Test
public void testColumnFamilySet() {
ConfigurationCopy conf = new ConfigurationCopy();
conf.set("table.group.lg1", "cf1,cf2");
conf.set("table.groups.enabled", "lg1");
try {
Map<String,Set<ByteSequence>> groups = LocalityGroupUtil.getLocalityGroups(conf);
assertEquals(1, groups.size());
assertNotNull(groups.get("lg1"));
assertEquals(2, groups.get("lg1").size());
assertTrue(groups.get("lg1").contains(new ArrayByteSequence("cf1")));
} catch (LocalityGroupConfigurationError err) {
fail();
}
conf.set("table.group.lg2", "cf1");
conf.set("table.groups.enabled", "lg1,lg2");
assertThrows(LocalityGroupConfigurationError.class,
() -> LocalityGroupUtil.getLocalityGroups(conf));
}
@Test
public void testEncoding() throws Exception {
byte[] test1 = new byte[256];
byte[] test2 = new byte[256];
for (int i = 0; i < 256; i++) {
test1[i] = (byte) (0xff & i);
test2[i] = (byte) (0xff & (255 - i));
}
ArrayByteSequence bs1 = new ArrayByteSequence(test1);
String ecf = LocalityGroupUtil.encodeColumnFamily(bs1);
// System.out.println(ecf);
ByteSequence bs2 = LocalityGroupUtil.decodeColumnFamily(ecf);
assertEquals(bs1, bs2);
assertEquals(ecf, LocalityGroupUtil.encodeColumnFamily(bs2));
// test encoding multiple column fams containing binary data
HashSet<Text> in = new HashSet<>();
HashSet<ByteSequence> in2 = new HashSet<>();
in.add(new Text(test1));
in2.add(new ArrayByteSequence(test1));
in.add(new Text(test2));
in2.add(new ArrayByteSequence(test2));
Set<ByteSequence> out =
LocalityGroupUtil.decodeColumnFamilies(LocalityGroupUtil.encodeColumnFamilies(in));
assertEquals(in2, out);
}
}
| 9,313 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/PreAllocatedArrayTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.util.Iterator;
import org.junit.jupiter.api.Test;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@SuppressFBWarnings(value = "RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT",
justification = "lambda assertThrows testing exception thrown")
public class PreAllocatedArrayTest {
/**
* Test method for {@link org.apache.accumulo.core.util.PreAllocatedArray#PreAllocatedArray(int)}.
*/
@Test
public void testPreAllocatedArray() {
PreAllocatedArray<String> strings = new PreAllocatedArray<>(5);
assertEquals(5, strings.length);
strings = new PreAllocatedArray<>(3);
assertEquals(3, strings.length);
strings = new PreAllocatedArray<>(0);
assertEquals(0, strings.length);
}
@Test
public void testPreAllocatedArray_Fail() {
assertThrows(IllegalArgumentException.class, () -> new PreAllocatedArray<String>(-5));
}
/**
* Test method for
* {@link org.apache.accumulo.core.util.PreAllocatedArray#set(int, java.lang.Object)}.<br>
* Test method for {@link org.apache.accumulo.core.util.PreAllocatedArray#get(int)}.<br>
* Test method for {@link org.apache.accumulo.core.util.PreAllocatedArray#iterator()}.
*/
@Test
public void testSet() {
int capacity = 5;
PreAllocatedArray<String> strings = new PreAllocatedArray<>(capacity);
assertEquals(capacity, strings.length);
// everything else should be null
strings.set(1, "a");
strings.set(4, "b");
assertEquals(capacity, strings.length);
// overwrite
String b = strings.set(4, "c");
assertEquals("b", b);
assertEquals(capacity, strings.length);
Iterator<String> iter = strings.iterator();
assertNull(iter.next()); // index 0
assertEquals("a", iter.next()); // index 1
assertNull(iter.next()); // index 2
assertNull(iter.next()); // index 3
assertEquals("c", iter.next()); // index 4
assertFalse(iter.hasNext()); // index 5
}
@Test
public void testSetIndexHigh() {
PreAllocatedArray<String> strings = new PreAllocatedArray<>(3);
strings.set(2, "in bounds");
assertThrows(IndexOutOfBoundsException.class, () -> strings.set(3, "out of bounds"));
}
@Test
public void testSetIndexNegative() {
PreAllocatedArray<String> strings = new PreAllocatedArray<>(3);
strings.set(0, "in bounds");
assertThrows(IndexOutOfBoundsException.class, () -> strings.set(-3, "out of bounds"));
}
@Test
public void testGetIndexHigh() {
PreAllocatedArray<String> strings = new PreAllocatedArray<>(3);
assertNull(strings.get(2));
// spotbugs error suppressed at class level for lambda
assertThrows(IndexOutOfBoundsException.class, () -> strings.get(3));
}
@Test
public void testGetIndexNegative() {
PreAllocatedArray<String> strings = new PreAllocatedArray<>(3);
assertNull(strings.get(0));
// spotbugs error suppressed at class level for lambda
assertThrows(IndexOutOfBoundsException.class, () -> strings.get(-3));
}
}
| 9,314 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/FastFormatTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.util.Arrays;
import org.apache.accumulo.core.fate.FateTxId;
import org.junit.jupiter.api.Test;
public class FastFormatTest {
@Test
public void testArrayOffset() {
byte[] str = new byte[8];
Arrays.fill(str, (byte) '-');
int len = FastFormat.toZeroPaddedString(str, 4, 64L, 1, 16, new byte[] {});
assertEquals(2, len);
assertEquals("----40--", new String(str, UTF_8));
Arrays.fill(str, (byte) '-');
len = FastFormat.toZeroPaddedString(str, 4, 64L, 2, 16, new byte[] {});
assertEquals(2, len);
assertEquals("----40--", new String(str, UTF_8));
Arrays.fill(str, (byte) '-');
len = FastFormat.toZeroPaddedString(str, 4, 64L, 3, 16, new byte[] {});
assertEquals(3, len);
assertEquals("----040-", new String(str, UTF_8));
Arrays.fill(str, (byte) '-');
len = FastFormat.toZeroPaddedString(str, 4, 64L, 1, 16, new byte[] {'P'});
assertEquals(3, len);
assertEquals("----P40-", new String(str, UTF_8));
Arrays.fill(str, (byte) '-');
len = FastFormat.toZeroPaddedString(str, 4, 64L, 2, 16, new byte[] {'P'});
assertEquals(3, len);
assertEquals("----P40-", new String(str, UTF_8));
Arrays.fill(str, (byte) '-');
len = FastFormat.toZeroPaddedString(str, 4, 64L, 3, 16, new byte[] {'P'});
assertEquals(4, len);
assertEquals("----P040", new String(str, UTF_8));
Arrays.fill(str, (byte) '-');
len = FastFormat.toZeroPaddedString(str, 2, 64L, 4, 16, new byte[] {'P'});
assertEquals(5, len);
assertEquals("--P0040-", new String(str, UTF_8));
}
@Test
public void testFormat() {
assertEquals("100",
new String(FastFormat.toZeroPaddedString(1296, 1, 36, new byte[] {}), UTF_8));
assertEquals("100",
new String(FastFormat.toZeroPaddedString(1296, 2, 36, new byte[] {}), UTF_8));
assertEquals("100",
new String(FastFormat.toZeroPaddedString(1296, 3, 36, new byte[] {}), UTF_8));
assertEquals("0100",
new String(FastFormat.toZeroPaddedString(1296, 4, 36, new byte[] {}), UTF_8));
assertEquals("00100",
new String(FastFormat.toZeroPaddedString(1296, 5, 36, new byte[] {}), UTF_8));
assertEquals("PA100",
new String(FastFormat.toZeroPaddedString(1296, 1, 36, new byte[] {'P', 'A'}), UTF_8));
assertEquals("PA100",
new String(FastFormat.toZeroPaddedString(1296, 2, 36, new byte[] {'P', 'A'}), UTF_8));
assertEquals("PA100",
new String(FastFormat.toZeroPaddedString(1296, 3, 36, new byte[] {'P', 'A'}), UTF_8));
assertEquals("PA0100",
new String(FastFormat.toZeroPaddedString(1296, 4, 36, new byte[] {'P', 'A'}), UTF_8));
assertEquals("PA00100",
new String(FastFormat.toZeroPaddedString(1296, 5, 36, new byte[] {'P', 'A'}), UTF_8));
assertEquals("PA000100",
new String(FastFormat.toZeroPaddedString(1296, 6, 36, new byte[] {'P', 'A'}), UTF_8));
assertEquals("PA0000100",
new String(FastFormat.toZeroPaddedString(1296, 7, 36, new byte[] {'P', 'A'}), UTF_8));
}
@Test
public void testNegative1() {
assertThrows(IllegalArgumentException.class,
() -> FastFormat.toZeroPaddedString(-5, 1, 36, new byte[] {}));
}
@Test
public void testNegative2() {
byte[] str = new byte[8];
assertThrows(IllegalArgumentException.class,
() -> FastFormat.toZeroPaddedString(str, 0, -5, 1, 36, new byte[] {}));
}
@Test
public void testArrayOutOfBounds() {
byte[] str = new byte[8];
assertThrows(ArrayIndexOutOfBoundsException.class,
() -> FastFormat.toZeroPaddedString(str, 4, 64L, 4, 16, new byte[] {'P'}));
}
@Test
public void testHexString() {
final String PREFIX = "FATE[";
final String SUFFIX = "]";
String formattedTxId = FateTxId.formatTid(64L);
String hexStr = FastFormat.toHexString(PREFIX, 64L, SUFFIX);
assertEquals(formattedTxId, hexStr);
long txid = FateTxId.fromString("FATE[2e429160071c63d8]");
assertEquals("FATE[2e429160071c63d8]", FastFormat.toHexString(PREFIX, txid, SUFFIX));
assertEquals(String.format("%016x", 64L), FastFormat.toHexString(64L));
assertEquals(String.format("%016x", 0X2e429160071c63d8L),
FastFormat.toHexString(0X2e429160071c63d8L));
assertEquals("-0000000000000040-", FastFormat.toHexString("-", 64L, "-"));
assertEquals("-00000000075bcd15", FastFormat.toHexString("-", 123456789L, ""));
assertEquals("000000000000000a", FastFormat.toHexString(0XaL));
assertEquals("000000000000000a", FastFormat.toHexString(10L));
assertEquals("0000000000000009", FastFormat.toHexString(9L));
assertEquals("0000000000000000", FastFormat.toHexString(0L));
}
@Test
public void testZeroPaddedHex() {
byte[] str = new byte[8];
Arrays.fill(str, (byte) '-');
str = FastFormat.toZeroPaddedHex(123456789L);
assertEquals(16, str.length);
assertEquals("00000000075bcd15", new String(str, UTF_8));
Arrays.fill(str, (byte) '-');
str = FastFormat.toZeroPaddedHex(0X2e429160071c63d8L);
assertEquals(16, str.length);
assertEquals("2e429160071c63d8", new String(str, UTF_8));
}
}
| 9,315 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/compaction/CompactionPrioritizerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util.compaction;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
import org.apache.accumulo.core.spi.compaction.CompactionJob;
import org.apache.accumulo.core.spi.compaction.CompactionKind;
import org.junit.jupiter.api.Test;
public class CompactionPrioritizerTest {
public CompactionJob createJob(CompactionKind kind, String tablet, int numFiles, int totalFiles) {
Collection<CompactableFile> files = new ArrayList<>();
for (int i = 0; i < numFiles; i++) {
files.add(CompactableFile
.create(URI.create("hdfs://foonn/accumulo/tables/5/" + tablet + "/" + i + ".rf"), 4, 4));
}
// TODO pass numFiles
return new CompactionJobImpl(
CompactionJobPrioritizer.createPriority(kind, totalFiles, numFiles),
CompactionExecutorIdImpl.externalId("test"), files, kind, Optional.of(false));
}
@Test
public void testPrioritizer() throws Exception {
assertEquals((short) 0, CompactionJobPrioritizer.createPriority(CompactionKind.USER, 0, 0));
assertEquals((short) 10000,
CompactionJobPrioritizer.createPriority(CompactionKind.USER, 10000, 0));
assertEquals((short) 32767,
CompactionJobPrioritizer.createPriority(CompactionKind.USER, 32767, 0));
assertEquals((short) 32767,
CompactionJobPrioritizer.createPriority(CompactionKind.USER, Integer.MAX_VALUE, 0));
assertEquals((short) -32768,
CompactionJobPrioritizer.createPriority(CompactionKind.SYSTEM, 0, 0));
assertEquals((short) -22768,
CompactionJobPrioritizer.createPriority(CompactionKind.SYSTEM, 10000, 0));
assertEquals((short) -1,
CompactionJobPrioritizer.createPriority(CompactionKind.SYSTEM, 32767, 0));
assertEquals((short) -1,
CompactionJobPrioritizer.createPriority(CompactionKind.SYSTEM, Integer.MAX_VALUE, 0));
}
@Test
public void testCompactionJobComparator() {
var j1 = createJob(CompactionKind.USER, "t-009", 10, 20);
var j2 = createJob(CompactionKind.USER, "t-010", 11, 25);
var j3 = createJob(CompactionKind.USER, "t-011", 11, 20);
var j4 = createJob(CompactionKind.SYSTEM, "t-012", 11, 30);
var j5 = createJob(CompactionKind.SYSTEM, "t-013", 5, 10);
var j8 = createJob(CompactionKind.SELECTOR, "t-014", 5, 21);
var j9 = createJob(CompactionKind.SELECTOR, "t-015", 7, 20);
var expected = List.of(j2, j3, j1, j4, j9, j8, j5);
var shuffled = new ArrayList<>(expected);
Collections.shuffle(shuffled);
Collections.sort(shuffled, CompactionJobPrioritizer.JOB_COMPARATOR);
assertEquals(expected, shuffled);
}
}
| 9,316 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/json/ByteArrayToBase64TypeAdapterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util.json;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
import com.google.gson.Gson;
public class ByteArrayToBase64TypeAdapterTest {
private static final Gson gson = ByteArrayToBase64TypeAdapter.createBase64Gson();
@Test
public void testSerializeText() throws IOException {
final Text original = new Text("This is a test");
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos)) {
original.write(new DataOutputStream(dos));
final String encoded = gson.toJson(baos.toByteArray());
final Text decoded = new Text();
decoded.readFields(
new DataInputStream(new ByteArrayInputStream(gson.fromJson(encoded, byte[].class))));
assertEquals(original.toString(), decoded.toString());
}
}
@Test
public void testByteArray() {
final byte[] original = new byte[] {0x01, 0x06, 0x34, 0x09, 0x12, 0x34, 0x57, 0x56, 0x30, 0x74};
final String encoded = gson.toJson(original);
final byte[] decoded = gson.fromJson(encoded, byte[].class);
assertArrayEquals(original, decoded);
}
}
| 9,317 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/threads/AccumuloUncaughtExceptionHandlerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util.threads;
import static org.apache.accumulo.core.util.threads.AccumuloUncaughtExceptionHandler.isError;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.io.UncheckedIOException;
import org.junit.jupiter.api.Test;
public class AccumuloUncaughtExceptionHandlerTest {
@Test
public void testIsError_noerror() {
assertFalse(isError(new IOException()));
assertFalse(isError(new UncheckedIOException(new IOException())));
Exception e = new UncheckedIOException(new IOException());
e.addSuppressed(new RuntimeException());
e.addSuppressed(new RuntimeException());
assertFalse(isError(e));
}
@Test
public void testIsError_error() {
assertTrue(isError(new UnsatisfiedLinkError()));
assertTrue(isError(new RuntimeException(new OutOfMemoryError())));
assertTrue(isError(new RuntimeException(new RuntimeException(new UnsatisfiedLinkError()))));
// check for cases where error has a non-error cause
assertTrue(isError(new Error(new RuntimeException())));
assertTrue(isError(new RuntimeException(new Error(new RuntimeException()))));
assertTrue(
isError(new RuntimeException(new RuntimeException(new Error(new RuntimeException())))));
// check for suppressed exception that has error
Exception e = new UncheckedIOException(new IOException());
e.addSuppressed(new RuntimeException());
e.addSuppressed(new RuntimeException());
e.addSuppressed(new RuntimeException(new UnsatisfiedLinkError()));
assertTrue(isError(e));
assertTrue(isError(new RuntimeException(e)));
// check for suppressed exception that has non terminal error
Exception e2 = new UncheckedIOException(new IOException());
e2.addSuppressed(new RuntimeException());
e2.addSuppressed(new RuntimeException());
e2.addSuppressed(new RuntimeException(new Error(new RuntimeException())));
assertTrue(isError(e2));
assertTrue(isError(new RuntimeException(e2)));
// test suppressed with error a few levels deep
Exception ed1 = new UncheckedIOException(new IOException());
Exception ed2 = new UncheckedIOException(new IOException());
Exception ed3 = new RuntimeException(new OutOfMemoryError());
ed1.addSuppressed(ed2);
ed2.addSuppressed(ed3);
assertTrue(isError(ed1));
assertTrue(isError(new RuntimeException(ed1)));
// test case where suppressed is an error
Exception e4 = new UncheckedIOException(new IOException());
e4.addSuppressed(new RuntimeException());
e4.addSuppressed(new RuntimeException());
e4.addSuppressed(new Error(new RuntimeException())); // try direct error (not nested as cause)
assertTrue(isError(e4));
assertTrue(isError(new RuntimeException(e4)));
}
@Test
public void testIsError_loop() {
Exception e1 = new UncheckedIOException(new IOException());
Exception e2 = new RuntimeException(new RuntimeException());
Exception e3 = new IllegalStateException();
// create an infinite loop of suppressed exceptions
e1.addSuppressed(e2);
e2.addSuppressed(e3);
e3.addSuppressed(e1);
assertTrue(isError(e1));
assertTrue(isError(new RuntimeException(e1)));
}
}
| 9,318 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/format/FormatterConfigTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util.format;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.text.DateFormat;
import org.junit.jupiter.api.Test;
public class FormatterConfigTest {
@Test
public void testConstructor() {
FormatterConfig config = new FormatterConfig();
assertFalse(config.willLimitShowLength());
assertFalse(config.willPrintTimestamps());
}
@Test
public void testSetShownLength() {
FormatterConfig config = new FormatterConfig();
assertThrows(IllegalArgumentException.class, () -> config.setShownLength(-1),
"Should throw on negative length.");
config.setShownLength(0);
assertEquals(0, config.getShownLength());
assertTrue(config.willLimitShowLength());
config.setShownLength(1);
assertEquals(1, config.getShownLength());
assertTrue(config.willLimitShowLength());
}
@Test
public void testDoNotLimitShowLength() {
FormatterConfig config = new FormatterConfig();
assertFalse(config.willLimitShowLength());
config.setShownLength(1);
assertTrue(config.willLimitShowLength());
config.doNotLimitShowLength();
assertFalse(config.willLimitShowLength());
}
@Test
public void testGetDateFormat() {
FormatterConfig config1 = new FormatterConfig();
DateFormat df1 = config1.getDateFormatSupplier().get();
FormatterConfig config2 = new FormatterConfig();
assertNotSame(df1, config2.getDateFormatSupplier().get());
config2.setDateFormatSupplier(config1.getDateFormatSupplier());
assertSame(df1, config2.getDateFormatSupplier().get());
// even though copying, it can't copy the Generator, so will pull out the same DateFormat
FormatterConfig configCopy = new FormatterConfig(config1);
assertSame(df1, configCopy.getDateFormatSupplier().get());
}
}
| 9,319 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/format/ShardedTableDistributionFormatterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util.format;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ShardedTableDistributionFormatterTest {
ShardedTableDistributionFormatter formatter;
Map<Key,Value> data;
@BeforeEach
public void setUp() {
data = new TreeMap<>();
formatter = new ShardedTableDistributionFormatter();
}
@Test
public void testInitialize() {
data.put(new Key(), new Value());
data.put(new Key("r", "~tab"), new Value());
formatter.initialize(data.entrySet(), new FormatterConfig());
assertTrue(formatter.hasNext());
formatter.next();
assertFalse(formatter.hasNext());
}
@Test
public void testAggregate() {
data.put(new Key("t", "~tab", "loc"), new Value("srv1"));
data.put(new Key("t;19700101", "~tab", "loc", 0), new Value("srv1"));
data.put(new Key("t;19700101", "~tab", "loc", 1), new Value("srv2"));
formatter.initialize(data.entrySet(), new FormatterConfig());
String[] resultLines = formatter.next().split("\n");
List<String> results = Arrays.asList(resultLines).subList(2, 4);
assertTrue(results.stream().anyMatch(s -> s.startsWith("NULL") && s.endsWith("" + 1)));
assertTrue(results.stream().anyMatch(s -> s.startsWith("19700101") && s.endsWith("" + 2)));
assertFalse(formatter.hasNext());
}
}
| 9,320 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/format/FormatterFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util.format;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.Collections;
import java.util.Map.Entry;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class FormatterFactoryTest {
Iterable<Entry<Key,Value>> scanner;
@BeforeEach
public void setUp() {
scanner = Collections.<Key,Value>emptyMap().entrySet();
}
@Test
public void testGetDefaultFormatter() {
final FormatterConfig timestampConfig = new FormatterConfig().setPrintTimestamps(true);
Formatter defaultFormatter = FormatterFactory.getDefaultFormatter(scanner, timestampConfig);
Formatter bogusFormatter =
FormatterFactory.getFormatter(Formatter.class, scanner, timestampConfig);
assertEquals(defaultFormatter.getClass(), bogusFormatter.getClass());
}
}
| 9,321 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/format/DateFormatSupplierTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util.format;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.text.DateFormat;
import java.util.Date;
import java.util.TimeZone;
import org.junit.jupiter.api.Test;
public class DateFormatSupplierTest {
/** Asserts two supplier instance create independent objects */
private void assertSuppliersIndependent(ThreadLocal<DateFormat> supplierA,
ThreadLocal<DateFormat> supplierB) {
DateFormat getA1 = supplierA.get();
DateFormat getA2 = supplierA.get();
assertSame(getA1, getA2);
DateFormat getB1 = supplierB.get();
DateFormat getB2 = supplierB.get();
assertSame(getB1, getB2);
assertNotSame(getA1, getB1);
}
@Test
public void testCreateDefaultFormatSupplier() {
ThreadLocal<DateFormat> supplierA = DateFormatSupplier.createDefaultFormatSupplier();
ThreadLocal<DateFormat> supplierB = DateFormatSupplier.createDefaultFormatSupplier();
assertSuppliersIndependent(supplierA, supplierB);
}
@Test
public void testCreateSimpleFormatSupplier() {
final String format = DateFormatSupplier.HUMAN_READABLE_FORMAT;
DateFormatSupplier supplierA = DateFormatSupplier.createSimpleFormatSupplier(format);
DateFormatSupplier supplierB = DateFormatSupplier.createSimpleFormatSupplier(format);
assertSuppliersIndependent(supplierA, supplierB);
// since dfA and dfB come from different suppliers, altering the TimeZone on one does not affect
// the other
supplierA.setTimeZone(TimeZone.getTimeZone("UTC"));
final DateFormat dfA = supplierA.get();
supplierB.setTimeZone(TimeZone.getTimeZone("EST"));
final DateFormat dfB = supplierB.get();
final String resultA = dfA.format(new Date(0));
assertEquals("1970/01/01 00:00:00.000", resultA);
final String resultB = dfB.format(new Date(0));
assertEquals("1969/12/31 19:00:00.000", resultB);
assertTrue(!resultA.equals(resultB));
}
}
| 9,322 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/format/DefaultFormatterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util.format;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.util.Collections;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TimeZone;
import java.util.TreeMap;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class DefaultFormatterTest {
public static final TimeZone UTC = TimeZone.getTimeZone("UTC");
public static final TimeZone EST = TimeZone.getTimeZone("EST");
DefaultFormatter df;
Iterable<Entry<Key,Value>> empty = Collections.<Key,Value>emptyMap().entrySet();
@BeforeEach
public void setUp() {
df = new DefaultFormatter();
}
@Test
public void testDoubleInitialize() {
final FormatterConfig timestampConfig = new FormatterConfig().setPrintTimestamps(true);
df.initialize(empty, timestampConfig);
assertThrows(IllegalStateException.class, () -> df.initialize(empty, timestampConfig));
}
@Test
public void testNextBeforeInitialize() {
assertThrows(IllegalStateException.class, df::hasNext);
}
@Test
public void testAppendBytes() {
StringBuilder sb = new StringBuilder();
byte[] data = {0, '\\', 'x', -0x01};
DefaultFormatter.appendValue(sb, new Value());
assertEquals("", sb.toString());
DefaultFormatter.appendText(sb, new Text(data));
assertEquals("\\x00\\\\x\\xFF", sb.toString());
}
@Test
public void testFormatEntry() {
final long timestamp = 0;
Map<Key,Value> map = new TreeMap<>();
map.put(new Key("a", "ab", "abc", timestamp), new Value("abcd"));
FormatterConfig config;
String answer;
// no timestamp, no max
config = new FormatterConfig();
df = new DefaultFormatter();
df.initialize(map.entrySet(), config);
answer = df.next();
assertEquals("a ab:abc []\tabcd", answer);
// yes timestamp, no max
config.setPrintTimestamps(true);
df = new DefaultFormatter();
df.initialize(map.entrySet(), config);
answer = df.next();
assertEquals("a ab:abc [] " + timestamp + "\tabcd", answer);
// yes timestamp, max of 1
config.setPrintTimestamps(true).setShownLength(1);
df = new DefaultFormatter();
df.initialize(map.entrySet(), config);
answer = df.next();
assertEquals("a a:a [] " + timestamp + "\ta", answer);
// yes timestamp, no max, new DateFormat
config.setPrintTimestamps(true).doNotLimitShowLength()
.setDateFormatSupplier(DateFormatSupplier.createSimpleFormatSupplier("YYYY"));
df = new DefaultFormatter();
df.initialize(map.entrySet(), config);
answer = df.next();
assertEquals("a ab:abc [] 1970\tabcd", answer);
// yes timestamp, no max, new DateFormat, different TimeZone
config.setPrintTimestamps(true).doNotLimitShowLength()
.setDateFormatSupplier(DateFormatSupplier.createSimpleFormatSupplier("HH", UTC));
df = new DefaultFormatter();
df.initialize(map.entrySet(), config);
answer = df.next();
assertEquals("a ab:abc [] 00\tabcd", answer);
config.setPrintTimestamps(true).doNotLimitShowLength()
.setDateFormatSupplier(DateFormatSupplier.createSimpleFormatSupplier("HH", EST));
df = new DefaultFormatter();
df.initialize(map.entrySet(), config);
answer = df.next();
assertEquals("a ab:abc [] 19\tabcd", answer);
}
}
| 9,323 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/util/format/StatisticsDisplayFormatterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.util.format;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Map;
import java.util.TreeMap;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class StatisticsDisplayFormatterTest {
StatisticsDisplayFormatter formatter;
Map<Key,Value> data;
@BeforeEach
public void setUp() {
data = new TreeMap<>();
formatter = new StatisticsDisplayFormatter();
}
@Test
public void testInitialize() {
data.put(new Key(), new Value());
formatter.initialize(data.entrySet(), new FormatterConfig());
assertTrue(formatter.hasNext());
}
@Test
public void testAggregate() {
data.put(new Key("", "", "", 1), new Value());
data.put(new Key("", "", "", 2), new Value());
formatter.initialize(data.entrySet(), new FormatterConfig());
String[] output = formatter.next().split("\n");
assertTrue(output[2].endsWith(": 1"));
assertTrue(output[5].endsWith(": 1"));
assertTrue(output[8].endsWith(": 1"));
assertEquals("2 entries matched.", output[9]);
assertFalse(formatter.hasNext());
}
}
| 9,324 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import org.apache.accumulo.core.WithTestNames;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor;
import org.apache.accumulo.core.file.rfile.RFile;
import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths not set by user input")
public class BloomFilterLayerLookupTest extends WithTestNames {
private static final Logger log = LoggerFactory.getLogger(BloomFilterLayerLookupTest.class);
@TempDir
private static File tempDir;
@Test
public void test() throws IOException {
HashSet<Integer> valsSet = new HashSet<>();
for (int i = 0; i < 100000; i++) {
valsSet.add(RANDOM.get().nextInt(Integer.MAX_VALUE));
}
ArrayList<Integer> vals = new ArrayList<>(valsSet);
Collections.sort(vals);
ConfigurationCopy acuconf = new ConfigurationCopy(DefaultConfiguration.getInstance());
acuconf.set(Property.TABLE_BLOOM_ENABLED, "true");
acuconf.set(Property.TABLE_BLOOM_KEY_FUNCTOR, ColumnFamilyFunctor.class.getName());
acuconf.set(Property.TABLE_FILE_TYPE, RFile.EXTENSION);
acuconf.set(Property.TABLE_BLOOM_LOAD_THRESHOLD, "1");
acuconf.set(Property.TSERV_BLOOM_LOAD_MAXCONCURRENT, "1");
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
// get output file name
String suffix = FileOperations.getNewFileExtension(acuconf);
String fname = new File(tempDir, testName() + "." + suffix).getAbsolutePath();
FileSKVWriter bmfw = FileOperations.getInstance().newWriterBuilder()
.forFile(UnreferencedTabletFile.of(fs, new Path(fname)), fs, conf,
NoCryptoServiceFactory.NONE)
.withTableConfiguration(acuconf).build();
// write data to file
long t1 = System.currentTimeMillis();
bmfw.startDefaultLocalityGroup();
for (Integer i : vals) {
String fi = String.format("%010d", i);
bmfw.append(new Key(new Text("r" + fi), new Text("cf1")), new Value("v" + fi));
bmfw.append(new Key(new Text("r" + fi), new Text("cf2")), new Value("v" + fi));
}
long t2 = System.currentTimeMillis();
log.debug(String.format("write rate %6.2f%n", vals.size() / ((t2 - t1) / 1000.0)));
bmfw.close();
t1 = System.currentTimeMillis();
FileSKVIterator bmfr = FileOperations.getInstance().newReaderBuilder()
.forFile(UnreferencedTabletFile.of(fs, new Path(fname)), fs, conf,
NoCryptoServiceFactory.NONE)
.withTableConfiguration(acuconf).build();
t2 = System.currentTimeMillis();
log.debug("Opened {} in {}", fname, (t2 - t1));
int hits = 0;
t1 = System.currentTimeMillis();
for (int i = 0; i < 5000; i++) {
int row = RANDOM.get().nextInt(Integer.MAX_VALUE);
seek(bmfr, row);
if (valsSet.contains(row)) {
hits++;
assertTrue(bmfr.hasTop());
}
}
t2 = System.currentTimeMillis();
double rate1 = 5000 / ((t2 - t1) / 1000.0);
log.debug(String.format("random lookup rate : %6.2f%n", rate1));
log.debug("hits = {}", hits);
int count = 0;
t1 = System.currentTimeMillis();
for (Integer row : valsSet) {
seek(bmfr, row);
assertTrue(bmfr.hasTop());
count++;
if (count >= 500) {
break;
}
}
t2 = System.currentTimeMillis();
double rate2 = 500 / ((t2 - t1) / 1000.0);
log.debug(String.format("existing lookup rate %6.2f%n", rate2));
log.debug("expected hits 500. Receive hits: {}", count);
bmfr.close();
assertTrue(rate1 > rate2);
}
private void seek(FileSKVIterator bmfr, int row) throws IOException {
String fi = String.format("%010d", row);
// bmfr.seek(new Range(new Text("r"+fi)));
Key k1 = new Key(new Text("r" + fi), new Text("cf1"));
bmfr.seek(new Range(k1, true, k1.followingKey(PartialKey.ROW_COLFAM), false), new ArrayList<>(),
false);
}
}
| 9,325 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/FilePrefixTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import org.junit.jupiter.api.Test;
public class FilePrefixTest {
@Test
public void testPrefixes() {
assertEquals(FilePrefix.BULK_IMPORT, FilePrefix.fromPrefix("I"));
assertEquals(FilePrefix.MINOR_COMPACTION, FilePrefix.fromPrefix("F"));
assertEquals(FilePrefix.MAJOR_COMPACTION, FilePrefix.fromPrefix("C"));
assertEquals(FilePrefix.MAJOR_COMPACTION_ALL_FILES, FilePrefix.fromPrefix("A"));
assertThrows(IllegalArgumentException.class, () -> {
FilePrefix.fromPrefix("B");
});
}
}
| 9,326 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/FileOperationsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file;
import static org.junit.jupiter.api.Assertions.assertFalse;
import java.io.File;
import java.io.IOException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.file.rfile.RFile;
import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.junit.jupiter.api.Test;
public class FileOperationsTest {
/**
* Test for filenames with +1 dot
*/
@Test
public void handlesFilenamesWithMoreThanOneDot() throws IOException {
boolean caughtException = false;
FileSKVWriter writer = null;
String filename = "target/test.file." + RFile.EXTENSION;
File testFile = new File(filename);
if (testFile.exists()) {
FileUtils.forceDelete(testFile);
}
try {
FileOperations fileOperations = FileOperations.getInstance();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
AccumuloConfiguration acuconf = DefaultConfiguration.getInstance();
writer = fileOperations.newWriterBuilder()
.forFile(UnreferencedTabletFile.of(fs, testFile), fs, conf, NoCryptoServiceFactory.NONE)
.withTableConfiguration(acuconf).build();
writer.close();
} catch (Exception ex) {
caughtException = true;
} finally {
if (writer != null) {
writer.close();
}
FileUtils.forceDelete(testFile);
}
assertFalse(caughtException, "Should not throw with more than 1 dot in filename.");
}
}
| 9,327 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/streams/RateLimitedOutputStreamTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.streams;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.OutputStream;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.accumulo.core.util.ratelimit.RateLimiter;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.easymock.EasyMock;
import org.junit.jupiter.api.Test;
import com.google.common.io.CountingOutputStream;
public class RateLimitedOutputStreamTest {
@Test
public void permitsAreProperlyAcquired() throws Exception {
// Create variables for tracking behaviors of mock object
AtomicLong rateLimiterPermitsAcquired = new AtomicLong();
// Construct mock object
RateLimiter rateLimiter = EasyMock.niceMock(RateLimiter.class);
// Stub Mock Method
rateLimiter.acquire(EasyMock.anyLong());
EasyMock.expectLastCall()
.andAnswer(() -> rateLimiterPermitsAcquired.addAndGet(EasyMock.getCurrentArgument(0)))
.anyTimes();
EasyMock.replay(rateLimiter);
long bytesWritten = 0;
try (RateLimitedOutputStream os =
new RateLimitedOutputStream(new NullOutputStream(), rateLimiter)) {
for (int i = 0; i < 100; ++i) {
byte[] bytes = new byte[Math.abs(RANDOM.get().nextInt() % 65536)];
os.write(bytes);
bytesWritten += bytes.length;
}
assertEquals(bytesWritten, os.position());
}
assertEquals(bytesWritten, rateLimiterPermitsAcquired.get());
}
public static class NullOutputStream extends FSDataOutputStream {
public NullOutputStream() {
super(new CountingOutputStream(OutputStream.nullOutputStream()), null);
}
}
}
| 9,328 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/streams/RateLimitedInputStreamTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.streams;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.InputStream;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.accumulo.core.util.ratelimit.RateLimiter;
import org.apache.hadoop.fs.Seekable;
import org.easymock.EasyMock;
import org.junit.jupiter.api.Test;
public class RateLimitedInputStreamTest {
@Test
public void permitsAreProperlyAcquired() throws Exception {
// Create variables for tracking behaviors of mock object
AtomicLong rateLimiterPermitsAcquired = new AtomicLong();
// Construct mock object
RateLimiter rateLimiter = EasyMock.niceMock(RateLimiter.class);
// Stub Mock Method
rateLimiter.acquire(EasyMock.anyLong());
EasyMock.expectLastCall()
.andAnswer(() -> rateLimiterPermitsAcquired.addAndGet(EasyMock.getCurrentArgument(0)))
.anyTimes();
EasyMock.replay(rateLimiter);
long bytesRetrieved = 0;
try (InputStream is = new RateLimitedInputStream(new RandomInputStream(), rateLimiter)) {
for (int i = 0; i < 100; ++i) {
int count = Math.abs(RANDOM.get().nextInt()) % 65536;
int countRead = is.read(new byte[count]);
assertEquals(count, countRead);
bytesRetrieved += count;
}
}
assertEquals(bytesRetrieved, rateLimiterPermitsAcquired.get());
}
private static class RandomInputStream extends InputStream implements Seekable {
@Override
public int read() {
return RANDOM.get().nextInt() & 0xff;
}
@Override
public void seek(long pos) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public long getPos() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public boolean seekToNewSource(long targetPos) {
throw new UnsupportedOperationException("Not supported yet.");
}
}
}
| 9,329 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/RollingStatsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.function.IntSupplier;
import org.apache.commons.math3.distribution.NormalDistribution;
import org.apache.commons.math3.distribution.ZipfDistribution;
import org.apache.commons.math3.random.Well19937c;
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
import org.junit.jupiter.api.Test;
import com.google.common.math.DoubleMath;
public class RollingStatsTest {
private static final double TOLERANCE = 1.0 / 1000;
private static void assertFuzzyEquals(double expected, double actual) {
assertTrue(DoubleMath.fuzzyEquals(expected, actual, TOLERANCE), String.format(
"expected: %f, actual: %f diff: %f", expected, actual, Math.abs(expected - actual)));
}
private static void checkAgreement(DescriptiveStatistics ds, RollingStats rs) {
// getting stats from ds is expensive, so do it once... otherwise unit test takes 11 sec
// instead of 5 secs
double expMean = ds.getMean();
double expVar = ds.getVariance();
double expStdDev = Math.sqrt(expVar);
assertFuzzyEquals(expMean, rs.getMean());
assertFuzzyEquals(expVar, rs.getVariance());
assertFuzzyEquals(expStdDev, rs.getStandardDeviation());
assertTrue(expMean >= 0);
assertTrue(rs.getMean() >= 0);
assertTrue(expVar >= 0);
assertTrue(rs.getVariance() >= 0);
assertTrue(expStdDev >= 0);
assertTrue(rs.getStandardDeviation() >= 0);
}
private static class StatTester {
private DescriptiveStatistics ds;
private RollingStats rs;
private RollingStats rsp;
StatTester(int windowSize) {
ds = new DescriptiveStatistics();
ds.setWindowSize(windowSize);
rs = new RollingStats(windowSize);
rsp = new RollingStats(windowSize);
}
void addValue(long v) {
ds.addValue(v);
rs.addValue(v);
rsp.addValue(v);
checkAgreement(ds, rs);
if (RANDOM.get().nextDouble() < 0.001) {
checkAgreement(ds, rsp);
}
}
void check() {
checkAgreement(ds, rsp);
}
}
@Test
public void testFewSizes() {
StatTester st = new StatTester(1019);
int[] keySizes = {103, 113, 123, 2345};
for (int i = 0; i < 10000; i++) {
st.addValue(keySizes[RANDOM.get().nextInt(keySizes.length)]);
}
st.check();
}
@Test
public void testConstant() {
StatTester st = new StatTester(1019);
for (int i = 0; i < 10000; i++) {
st.addValue(111);
}
st.check();
}
@Test
public void testUniformIncreasing() {
for (int windowSize : new int[] {10, 13, 20, 100, 500}) {
StatTester st = new StatTester(windowSize);
for (int i = 0; i < 1000; i++) {
int v = 200 + RANDOM.get().nextInt(50);
st.addValue(v);
}
st.check();
}
}
@Test
public void testSlowIncreases() {
// number of keys with the same len
int len = 100;
StatTester st = new StatTester(1019);
for (int i = 0; i < 50; i++) {
for (int j = 0; j < 3000; j++) {
st.addValue(len);
}
len = (int) (len * 1.1);
}
st.check();
}
private void testDistribrution(IntSupplier d) {
StatTester st = new StatTester(2017);
for (int i = 0; i < 7000; i++) {
st.addValue(d.getAsInt());
}
st.check();
}
@Test
public void testZipf() {
ZipfDistribution zd = new ZipfDistribution(new Well19937c(42), 1000, 2);
testDistribrution(() -> zd.sample() * 100);
}
@Test
public void testNormal() {
NormalDistribution nd = new NormalDistribution(new Well19937c(42), 200, 20);
testDistribrution(() -> (int) nd.sample());
}
@Test
public void testSpikes() {
StatTester st = new StatTester(3017);
for (int i = 0; i < 13; i++) {
// write small keys
int numSmall = 1000 + RANDOM.get().nextInt(1000);
for (int s = 0; s < numSmall; s++) {
int sks = 50 + RANDOM.get().nextInt(100);
// simulate row with multiple cols
for (int c = 0; c < 3; c++) {
st.addValue(sks);
}
}
// write a few large keys
int numLarge = 1 + RANDOM.get().nextInt(1);
for (int l = 0; l < numLarge; l++) {
int lks = 500000 + RANDOM.get().nextInt(1000000);
for (int c = 0; c < 3; c++) {
st.addValue(lks);
}
}
}
st.check();
}
}
| 9,330 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/GenerateSplitsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static org.apache.accumulo.core.file.rfile.GenerateSplits.getEvenlySpacedSplits;
import static org.apache.accumulo.core.file.rfile.GenerateSplits.main;
import static org.apache.accumulo.core.file.rfile.RFileTest.newColFamByteSequence;
import static org.apache.accumulo.core.file.rfile.RFileTest.newKey;
import static org.apache.accumulo.core.file.rfile.RFileTest.newValue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths provided by test")
public class GenerateSplitsTest {
private static final Logger log = LoggerFactory.getLogger(GenerateSplitsTest.class);
@TempDir
private static File tempDir;
private static final RFileTest.TestRFile trf = new RFileTest.TestRFile(null);
private static String rfilePath;
private static String splitsFilePath;
/**
* Creates a test file with 84 bytes of data and 2 Locality groups.
*/
@BeforeAll
public static void createFile() throws IOException {
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("cf1", "cf2"));
trf.writer.append(newKey("r1", "cf1", "cq1", "L1", 55), newValue("foo1"));
trf.writer.append(newKey("r2", "cf2", "cq1", "L1", 55), newValue("foo2"));
trf.writer.append(newKey("r3", "cf2", "cq1", "L1", 55), newValue("foo3"));
trf.writer.startNewLocalityGroup("lg2", newColFamByteSequence("cf3", "cf4"));
trf.writer.append(newKey("r4", "cf3", "cq1", "L1", 55), newValue("foo4"));
trf.writer.append(newKey("r5", "cf4", "cq1", "L1", 55), newValue("foo5"));
trf.writer.append(newKey("r6", "cf4", "cq1", "L1", 55), newValue("foo6"));
trf.closeWriter();
File file = new File(tempDir, "testGenerateSplits.rf");
assertTrue(file.createNewFile(), "Failed to create file: " + file);
try (var fileOutputStream = new FileOutputStream(file)) {
fileOutputStream.write(trf.baos.toByteArray());
}
rfilePath = "file:" + file.getAbsolutePath();
log.info("Wrote to file {}", rfilePath);
File splitsFile = new File(tempDir, "testSplitsFile");
assertTrue(splitsFile.createNewFile(), "Failed to create file: " + splitsFile);
splitsFilePath = splitsFile.getAbsolutePath();
}
@Test
public void testNum() throws Exception {
List<String> args = List.of(rfilePath, "--num", "2", "-sf", splitsFilePath);
log.info("Invoking GenerateSplits with {}", args);
GenerateSplits.main(args.toArray(new String[0]));
verifySplitsFile("r3", "r6");
// test more splits requested than indices
args = List.of(rfilePath, "--num", "4", "-sf", splitsFilePath);
log.info("Invoking GenerateSplits with {}", args);
GenerateSplits.main(args.toArray(new String[0]));
verifySplitsFile("r2", "r3", "r4", "r5");
}
@Test
public void testSplitSize() throws Exception {
List<String> args = List.of(rfilePath, "-ss", "21", "-sf", splitsFilePath);
log.info("Invoking GenerateSplits with {}", args);
GenerateSplits.main(args.toArray(new String[0]));
verifySplitsFile("r2", "r4", "r6");
}
private void verifySplitsFile(String... splits) throws IOException {
String splitsFile = Files.readString(Paths.get(splitsFilePath));
assertEquals(splits.length, splitsFile.split("\n").length);
for (String s : splits) {
assertTrue(splitsFile.contains(s), "Did not find " + s + " in: " + splitsFile);
}
}
@Test
public void testErrors() {
List<String> args = List.of("missingFile.rf", "-n", "2");
log.info("Invoking GenerateSplits with {}", args);
assertThrows(FileNotFoundException.class, () -> main(args.toArray(new String[0])));
List<String> args2 = List.of(rfilePath);
log.info("Invoking GenerateSplits with {}", args2);
var e = assertThrows(IllegalArgumentException.class, () -> main(args2.toArray(new String[0])));
assertTrue(e.getMessage().contains("Required number of splits or"), e.getMessage());
List<String> args3 = List.of(rfilePath, "-n", "2", "-ss", "40");
log.info("Invoking GenerateSplits with {}", args3);
e = assertThrows(IllegalArgumentException.class, () -> main(args3.toArray(new String[0])));
assertTrue(e.getMessage().contains("Requested number of splits and"), e.getMessage());
File dir1 = new File(tempDir, "dir1/");
File dir2 = new File(tempDir, "dir2/");
assertTrue(dir1.mkdir() && dir2.mkdir(), "Failed to make new sub-directories");
List<String> args4 = List.of(dir1.getAbsolutePath(), dir2.getAbsolutePath(), "-n", "2");
log.info("Invoking GenerateSplits with {}", args4);
e = assertThrows(IllegalArgumentException.class, () -> main(args4.toArray(new String[0])));
assertTrue(e.getMessage().contains("No files were found"), e.getMessage());
}
@Test
public void testEvenlySpaced() {
TreeSet<String> desired = getEvenlySpacedSplits(15, 4, numSplits(15));
assertEquals(4, desired.size());
assertEquals(Set.of("003", "006", "009", "012"), desired);
desired = getEvenlySpacedSplits(15, 10, numSplits(15));
assertEquals(10, desired.size());
assertEquals(Set.of("001", "002", "004", "005", "006", "008", "009", "010", "012", "013"),
desired);
desired = getEvenlySpacedSplits(10, 9, numSplits(10));
assertEquals(9, desired.size());
assertEquals(Set.of("001", "002", "003", "004", "005", "006", "007", "008", "009"), desired);
}
/**
* Create the requested number of splits. Works up to 3 digits or max 999.
*/
private Iterator<String> numSplits(int num) {
TreeSet<String> splits = new TreeSet<>();
for (int i = 0; i < num; i++) {
splits.add(String.format("%03d", i));
}
return splits.iterator();
}
}
| 9,331 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiThreadedRFileTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.io.UncheckedIOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadPoolExecutor;
import org.apache.accumulo.core.client.sample.Sampler;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.crypto.CryptoFactoryLoader;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.FileSKVIterator;
import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachableBuilder;
import org.apache.accumulo.core.file.rfile.RFile.Reader;
import org.apache.accumulo.core.file.rfile.bcfile.BCFile;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.ColumnFamilySkippingIterator;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.accumulo.core.sample.impl.SamplerFactory;
import org.apache.accumulo.core.util.threads.ThreadPools;
import org.apache.commons.lang3.mutable.MutableInt;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths not set by user input")
public class MultiThreadedRFileTest {
private static final Logger LOG = LoggerFactory.getLogger(MultiThreadedRFileTest.class);
private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<>();
private static void checkIndex(Reader reader) throws IOException {
FileSKVIterator indexIter = reader.getIndex();
if (indexIter.hasTop()) {
Key lastKey = new Key(indexIter.getTopKey());
if (reader.getFirstRow().compareTo(lastKey.getRow()) > 0) {
throw new IllegalStateException(
"First key out of order " + reader.getFirstRow() + " " + lastKey);
}
indexIter.next();
while (indexIter.hasTop()) {
if (lastKey.compareTo(indexIter.getTopKey()) > 0) {
throw new IllegalStateException(
"Indext out of order " + lastKey + " " + indexIter.getTopKey());
}
lastKey = new Key(indexIter.getTopKey());
indexIter.next();
}
if (!reader.getLastRow().equals(lastKey.getRow())) {
throw new IllegalStateException(
"Last key out of order " + reader.getLastRow() + " " + lastKey);
}
}
}
public static class TestRFile {
private Configuration conf = new Configuration();
public RFile.Writer writer;
private FSDataOutputStream dos;
private AccumuloConfiguration accumuloConfiguration;
public Reader reader;
public SortedKeyValueIterator<Key,Value> iter;
public File rfile = null;
public boolean deepCopy = false;
public TestRFile(AccumuloConfiguration accumuloConfiguration) {
this.accumuloConfiguration = accumuloConfiguration;
if (this.accumuloConfiguration == null) {
this.accumuloConfiguration = DefaultConfiguration.getInstance();
}
}
public void close() throws IOException {
if (rfile != null) {
FileSystem fs = FileSystem.newInstance(conf);
Path path = new Path("file://" + rfile);
fs.delete(path, false);
}
}
public TestRFile deepCopy() throws IOException {
TestRFile copy = new TestRFile(accumuloConfiguration);
// does not copy any writer resources. This would be for read only.
copy.reader = reader.deepCopy(null);
copy.rfile = rfile;
copy.iter = new ColumnFamilySkippingIterator(copy.reader);
copy.deepCopy = true;
checkIndex(copy.reader);
return copy;
}
public void openWriter(boolean startDLG) throws IOException {
if (deepCopy) {
throw new IOException("Cannot open writer on a deep copy");
}
if (rfile == null) {
rfile = File.createTempFile("TestRFile", ".rf");
}
FileSystem fs = FileSystem.newInstance(conf);
Path path = new Path("file://" + rfile);
dos = fs.create(path, true);
BCFile.Writer _cbw = new BCFile.Writer(dos, null, "gz", conf,
CryptoFactoryLoader.getServiceForServer(accumuloConfiguration));
SamplerConfigurationImpl samplerConfig =
SamplerConfigurationImpl.newSamplerConfig(accumuloConfiguration);
Sampler sampler = null;
if (samplerConfig != null) {
sampler = SamplerFactory.newSampler(samplerConfig, accumuloConfiguration);
}
writer = new RFile.Writer(_cbw, 1000, 1000, samplerConfig, sampler);
if (startDLG) {
writer.startDefaultLocalityGroup();
}
}
public void closeWriter() throws IOException {
if (deepCopy) {
throw new IOException("Cannot open writer on a deepcopy");
}
dos.flush();
writer.close();
dos.flush();
dos.close();
}
public void openReader() throws IOException {
FileSystem fs = FileSystem.newInstance(conf);
Path path = new Path("file://" + rfile);
AccumuloConfiguration defaultConf = DefaultConfiguration.getInstance();
// the caches used to obfuscate the multithreaded issues
CachableBuilder b = new CachableBuilder().fsPath(fs, path).conf(conf)
.cryptoService(CryptoFactoryLoader.getServiceForServer(defaultConf));
reader = new RFile.Reader(new CachableBlockFile.Reader(b));
iter = new ColumnFamilySkippingIterator(reader);
checkIndex(reader);
}
public void closeReader() throws IOException {
reader.close();
}
}
static Key newKey(String row, String cf, String cq, String cv, long ts) {
return new Key(row.getBytes(), cf.getBytes(), cq.getBytes(), cv.getBytes(), ts);
}
static Value newValue(String val) {
return new Value(val);
}
public AccumuloConfiguration conf = null;
@SuppressFBWarnings(value = "INFORMATION_EXPOSURE_THROUGH_AN_ERROR_MESSAGE",
justification = "information put into error message is safe and used for testing")
@Test
public void testMultipleReaders() throws IOException {
final List<Throwable> threadExceptions = Collections.synchronizedList(new ArrayList<>());
Map<String,MutableInt> messages = new HashMap<>();
Map<String,String> stackTrace = new HashMap<>();
final TestRFile trfBase = new TestRFile(conf);
writeData(trfBase);
trfBase.openReader();
try {
validate(trfBase);
final TestRFile trfBaseCopy = trfBase.deepCopy();
validate(trfBaseCopy);
// now start up multiple RFile deepcopies
int maxThreads = 10;
String name = "MultiThreadedRFileTestThread";
ThreadPoolExecutor pool = ThreadPools.getServerThreadPools().createThreadPool(maxThreads + 1,
maxThreads + 1, 5 * 60, SECONDS, name, false);
try {
Runnable runnable = () -> {
try {
TestRFile trf = trfBase;
synchronized (trfBaseCopy) {
trf = trfBaseCopy.deepCopy();
}
validate(trf);
} catch (Throwable t) {
threadExceptions.add(t);
}
};
for (int i = 0; i < maxThreads; i++) {
pool.execute(runnable);
}
} finally {
pool.shutdown();
try {
pool.awaitTermination(Long.MAX_VALUE, MILLISECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
for (Throwable t : threadExceptions) {
String msg = t.getClass() + " : " + t.getMessage();
if (messages.containsKey(msg)) {
messages.get(msg).increment();
} else {
messages.put(msg, new MutableInt(1));
}
StringWriter string = new StringWriter();
PrintWriter writer = new PrintWriter(string);
t.printStackTrace(writer);
writer.flush();
stackTrace.put(msg, string.getBuffer().toString());
}
} finally {
trfBase.closeReader();
trfBase.close();
}
for (String message : messages.keySet()) {
LOG.error("{}: {}", messages.get(message), message);
LOG.error("{}", stackTrace.get(message));
}
assertTrue(threadExceptions.isEmpty());
}
private void validate(TestRFile trf) throws IOException {
RANDOM.get().ints(10, 0, 4).forEach(part -> {
try {
Range range = new Range(getKey(part, 0, 0), true, getKey(part, 4, 2048), true);
trf.iter.seek(range, EMPTY_COL_FAMS, false);
Key last = null;
for (int locality = 0; locality < 4; locality++) {
for (int i = 0; i < 2048; i++) {
Key key = getKey(part, locality, i);
Value value = getValue(i);
assertTrue(trf.iter.hasTop(),
"No record found for row " + part + " locality " + locality + " index " + i);
assertEquals(key, trf.iter.getTopKey(),
"Invalid key found for row " + part + " locality " + locality + " index " + i);
assertEquals(value, trf.iter.getTopValue(),
"Invalid value found for row " + part + " locality " + locality + " index " + i);
last = trf.iter.getTopKey();
trf.iter.next();
}
}
if (trf.iter.hasTop()) {
assertFalse(trf.iter.hasTop(),
"Found " + trf.iter.getTopKey() + " after " + last + " in " + range);
}
range = new Range(getKey(4, 4, 0), true, null, true);
trf.iter.seek(range, EMPTY_COL_FAMS, false);
if (trf.iter.hasTop()) {
assertFalse(trf.iter.hasTop(), "Found " + trf.iter.getTopKey() + " in " + range);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
Range range = new Range((Key) null, null);
trf.iter.seek(range, EMPTY_COL_FAMS, false);
Key last = null;
for (int part = 0; part < 4; part++) {
for (int locality = 0; locality < 4; locality++) {
for (int i = 0; i < 2048; i++) {
Key key = getKey(part, locality, i);
Value value = getValue(i);
assertTrue(trf.iter.hasTop(),
"No record found for row " + part + " locality " + locality + " index " + i);
assertEquals(key, trf.iter.getTopKey(),
"Invalid key found for row " + part + " locality " + locality + " index " + i);
assertEquals(value, trf.iter.getTopValue(),
"Invalid value found for row " + part + " locality " + locality + " index " + i);
last = trf.iter.getTopKey();
trf.iter.next();
}
}
}
if (trf.iter.hasTop()) {
assertFalse(trf.iter.hasTop(),
"Found " + trf.iter.getTopKey() + " after " + last + " in " + range);
}
}
private void writeData(TestRFile trfBase) throws IOException {
trfBase.openWriter(false);
try {
for (int locality = 1; locality < 4; locality++) {
trfBase.writer.startNewLocalityGroup("locality" + locality,
Collections.singleton(new ArrayByteSequence(getCf(locality))));
for (int part = 0; part < 4; part++) {
for (int i = 0; i < 2048; i++) {
trfBase.writer.append(getKey(part, locality, i), getValue(i));
}
}
}
trfBase.writer.startDefaultLocalityGroup();
for (int part = 0; part < 4; part++) {
for (int i = 0; i < 2048; i++) {
trfBase.writer.append(getKey(part, 0, i), getValue(i));
}
}
} finally {
trfBase.closeWriter();
}
}
private Key getKey(int part, int locality, int index) {
String row = "r000" + part;
String cf = getCf(locality);
String cq = "cq" + pad(index);
return newKey(row, cf, cq, "", 1);
}
private String pad(int val) {
String valStr = String.valueOf(val);
switch (valStr.length()) {
case 1:
return "000" + valStr;
case 2:
return "00" + valStr;
case 3:
return "0" + valStr;
default:
return valStr;
}
}
private Value getValue(int index) {
return newValue("" + index);
}
private String getCf(int locality) {
return "cf" + locality;
}
}
| 9,332 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/KeyShortenerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.accumulo.core.data.Key;
import org.junit.jupiter.api.Test;
import com.google.common.primitives.Bytes;
public class KeyShortenerTest {
private static final byte[] E = new byte[0];
private static final byte[] FF = {(byte) 0xff};
private void assertBetween(Key p, Key s, Key c) {
assertTrue(p.compareTo(s) < 0);
assertTrue(s.compareTo(c) < 0);
}
private void testKeys(Key prev, Key current, Key expected) {
Key sk = KeyShortener.shorten(prev, current);
assertBetween(prev, sk, current);
assertEquals(expected, sk);
}
/**
* append 0xff to end of string
*/
private byte[] apendFF(String s) {
return Bytes.concat(s.getBytes(), FF);
}
/**
* append 0x00 to end of string
*/
private byte[] append00(String s) {
return Bytes.concat(s.getBytes(), new byte[] {(byte) 0x00});
}
private byte[] toBytes(Object o) {
if (o instanceof String) {
return ((String) o).getBytes();
} else if (o instanceof byte[]) {
return (byte[]) o;
}
throw new IllegalArgumentException();
}
private Key newKey(Object row, Object fam, Object qual, long ts) {
return new Key(toBytes(row), toBytes(fam), toBytes(qual), E, ts);
}
@Test
public void testOneCharacterDifference() {
// row has char that differs by one byte
testKeys(new Key("r321hahahaha", "f89222", "q90232e"),
new Key("r321hbhahaha", "f89222", "q90232e"), newKey(apendFF("r321ha"), E, E, 0));
// family has char that differs by one byte
testKeys(new Key("r321hahahaha", "f89222", "q90232e"),
new Key("r321hahahaha", "f89322", "q90232e"),
newKey("r321hahahaha", apendFF("f892"), E, 0));
// qualifier has char that differs by one byte
testKeys(new Key("r321hahahaha", "f89222", "q90232e"),
new Key("r321hahahaha", "f89222", "q91232e"),
newKey("r321hahahaha", "f89222", apendFF("q90"), 0));
}
@Test
public void testMultiCharacterDifference() {
// row has char that differs by two bytes
testKeys(new Key("r321hahahaha", "f89222", "q90232e"),
new Key("r321hchahaha", "f89222", "q90232e"), newKey("r321hb", E, E, 0));
// family has char that differs by two bytes
testKeys(new Key("r321hahahaha", "f89222", "q90232e"),
new Key("r321hahahaha", "f89422", "q90232e"), newKey("r321hahahaha", "f893", E, 0));
// qualifier has char that differs by two bytes
testKeys(new Key("r321hahahaha", "f89222", "q90232e"),
new Key("r321hahahaha", "f89222", "q92232e"), newKey("r321hahahaha", "f89222", "q91", 0));
}
@Test
public void testOneCharacterDifferenceAndFF() {
byte[] ff1 = Bytes.concat(apendFF("mop"), "b".getBytes());
byte[] ff2 = Bytes.concat(apendFF("mop"), FF, "b".getBytes());
String eff1 = "moq";
testKeys(newKey(ff1, "f89222", "q90232e", 34), new Key("mor56", "f89222", "q90232e"),
newKey(eff1, E, E, 0));
testKeys(newKey("r1", ff1, "q90232e", 34), new Key("r1", "mor56", "q90232e"),
newKey("r1", eff1, E, 0));
testKeys(newKey("r1", "f1", ff1, 34), new Key("r1", "f1", "mor56"),
newKey("r1", "f1", eff1, 0));
testKeys(newKey(ff2, "f89222", "q90232e", 34), new Key("mor56", "f89222", "q90232e"),
newKey(eff1, E, E, 0));
testKeys(newKey("r1", ff2, "q90232e", 34), new Key("r1", "mor56", "q90232e"),
newKey("r1", eff1, E, 0));
testKeys(newKey("r1", "f1", ff2, 34), new Key("r1", "f1", "mor56"),
newKey("r1", "f1", eff1, 0));
}
@Test
public void testOneCharacterDifferenceAtEnd() {
testKeys(new Key("r321hahahaha", "f89222", "q90232e"),
new Key("r321hahahahb", "f89222", "q90232e"), newKey(append00("r321hahahaha"), E, E, 0));
testKeys(new Key("r321hahahaha", "f89222", "q90232e"),
new Key("r321hahahaha", "f89223", "q90232e"),
newKey("r321hahahaha", append00("f89222"), E, 0));
testKeys(new Key("r321hahahaha", "f89222", "q90232e"),
new Key("r321hahahaha", "f89222", "q90232f"),
newKey("r321hahahaha", "f89222", append00("q90232e"), 0));
}
@Test
public void testSamePrefix() {
testKeys(new Key("r3boot4", "f89222", "q90232e"), new Key("r3boot452", "f89222", "q90232e"),
newKey(append00("r3boot4"), E, E, 0));
testKeys(new Key("r3boot4", "f892", "q90232e"), new Key("r3boot4", "f89222", "q90232e"),
newKey("r3boot4", append00("f892"), E, 0));
testKeys(new Key("r3boot4", "f89222", "q902"), new Key("r3boot4", "f89222", "q90232e"),
newKey("r3boot4", "f89222", append00("q902"), 0));
}
@Test
public void testSamePrefixAnd00() {
Key prev = new Key("r3boot4", "f89222", "q90232e");
assertEquals(prev,
KeyShortener.shorten(prev, newKey(append00("r3boot4"), "f89222", "q90232e", 8)));
prev = new Key("r3boot4", "f892", "q90232e");
assertEquals(prev,
KeyShortener.shorten(prev, newKey("r3boot4", append00("f892"), "q90232e", 8)));
prev = new Key("r3boot4", "f89222", "q902");
assertEquals(prev,
KeyShortener.shorten(prev, newKey("r3boot4", "f89222", append00("q902"), 8)));
}
@Test
public void testSanityCheck1() {
// prev and shortened equal
Key prev = new Key("r001", "f002", "q006");
assertEquals(prev, KeyShortener.sanityCheck(prev, new Key("r002", "f002", "q006"),
new Key("r001", "f002", "q006")));
// prev > shortened equal
assertEquals(prev, KeyShortener.sanityCheck(prev, new Key("r003", "f002", "q006"),
new Key("r001", "f002", "q006")));
// current and shortened equal
assertEquals(prev, KeyShortener.sanityCheck(prev, new Key("r003", "f002", "q006"),
new Key("r003", "f002", "q006")));
// shortened > current
assertEquals(prev, KeyShortener.sanityCheck(prev, new Key("r003", "f002", "q006"),
new Key("r004", "f002", "q006")));
}
}
| 9,333 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.core.client.sample.RowSampler;
import org.apache.accumulo.core.client.sample.Sampler;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.crypto.CryptoFactoryLoader;
import org.apache.accumulo.core.crypto.CryptoTest;
import org.apache.accumulo.core.crypto.CryptoTest.ConfigMode;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.FileSKVIterator;
import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheConfiguration;
import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheManagerFactory;
import org.apache.accumulo.core.file.blockfile.cache.lru.LruBlockCacheManager;
import org.apache.accumulo.core.file.blockfile.impl.BasicCacheProvider;
import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachableBuilder;
import org.apache.accumulo.core.file.rfile.RFile.Reader;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.ColumnFamilySkippingIterator;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.accumulo.core.sample.impl.SamplerFactory;
import org.apache.accumulo.core.spi.cache.BlockCacheManager;
import org.apache.accumulo.core.spi.cache.CacheType;
import org.apache.accumulo.core.spi.crypto.CryptoEnvironment;
import org.apache.accumulo.core.spi.crypto.CryptoService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import com.google.common.hash.HashCode;
import com.google.common.hash.Hasher;
import com.google.common.hash.Hashing;
import com.google.common.primitives.Bytes;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths not set by user input")
public class RFileTest extends AbstractRFileTest {
public static class SampleIE implements IteratorEnvironment {
private SamplerConfiguration samplerConfig;
SampleIE(SamplerConfiguration config) {
this.samplerConfig = config;
}
@Override
public boolean isSamplingEnabled() {
return samplerConfig != null;
}
@Override
public SamplerConfiguration getSamplerConfiguration() {
return samplerConfig;
}
}
private static final Configuration hadoopConf = new Configuration();
@TempDir
private static File tempDir;
@BeforeAll
public static void setupCryptoKeyFile() throws Exception {
CryptoTest.setupKeyFiles(RFileTest.class);
}
static class SeekableByteArrayInputStream extends ByteArrayInputStream
implements Seekable, PositionedReadable {
public SeekableByteArrayInputStream(byte[] buf) {
super(buf);
}
@Override
public long getPos() {
return pos;
}
@Override
public void seek(long pos) throws IOException {
if (mark != 0) {
throw new IllegalStateException();
}
reset();
long skipped = skip(pos);
if (skipped != pos) {
throw new IOException();
}
}
@Override
public boolean seekToNewSource(long targetPos) {
return false;
}
@Override
public int read(long position, byte[] buffer, int offset, int length) {
if (position >= buf.length) {
throw new IllegalArgumentException();
}
if (position + length > buf.length) {
throw new IllegalArgumentException();
}
if (length > buffer.length) {
throw new IllegalArgumentException();
}
System.arraycopy(buf, (int) position, buffer, offset, length);
return length;
}
@Override
public void readFully(long position, byte[] buffer) {
read(position, buffer, 0, buffer.length);
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length) {
read(position, buffer, offset, length);
}
}
static Key newKey(String row, String cf, String cq, String cv, long ts) {
return new Key(row.getBytes(), cf.getBytes(), cq.getBytes(), cv.getBytes(), ts);
}
static Value newValue(String val) {
return new Value(val);
}
static String formatString(String prefix, int i) {
return String.format(prefix + "%06d", i);
}
@Test
public void test1() throws IOException {
// test an empty file
TestRFile trf = new TestRFile(conf);
trf.openWriter();
trf.closeWriter();
trf.openReader();
trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
assertFalse(trf.iter.hasTop());
assertNull(trf.reader.getLastRow());
trf.closeReader();
}
@Test
public void test2() throws IOException {
// test an rfile with one entry
TestRFile trf = new TestRFile(conf);
trf.openWriter();
trf.writer.append(newKey("r1", "cf1", "cq1", "L1", 55), newValue("foo"));
trf.closeWriter();
trf.openReader();
// seek before everything
trf.seek(null);
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("r1", "cf1", "cq1", "L1", 55));
assertEquals(trf.iter.getTopValue(), newValue("foo"));
trf.iter.next();
assertFalse(trf.iter.hasTop());
// seek after the key
trf.seek(newKey("r2", "cf1", "cq1", "L1", 55));
assertFalse(trf.iter.hasTop());
// seek exactly to the key
trf.seek(newKey("r1", "cf1", "cq1", "L1", 55));
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("r1", "cf1", "cq1", "L1", 55));
assertEquals(trf.iter.getTopValue(), newValue("foo"));
trf.iter.next();
assertFalse(trf.iter.hasTop());
assertEquals(new Text("r1"), trf.reader.getLastRow());
trf.closeReader();
}
@Test
public void test3() throws IOException {
// test an rfile with multiple rows having multiple columns
TestRFile trf = new TestRFile(conf);
trf.openWriter();
int val = 0;
ArrayList<Key> expectedKeys = new ArrayList<>(10000);
ArrayList<Value> expectedValues = new ArrayList<>(10000);
for (int row = 0; row < 4; row++) {
String rowS = formatString("r_", row);
for (int cf = 0; cf < 4; cf++) {
String cfS = formatString("cf_", cf);
for (int cq = 0; cq < 4; cq++) {
String cqS = formatString("cq_", cq);
for (int cv = 'A'; cv < 'A' + 4; cv++) {
String cvS = "" + (char) cv;
for (int ts = 4; ts > 0; ts--) {
Key k = newKey(rowS, cfS, cqS, cvS, ts);
// check below ensures when all key sizes are same more than one index block is
// created
assertEquals(27, k.getSize());
k.setDeleted(true);
Value v = newValue("" + val);
trf.writer.append(k, v);
expectedKeys.add(k);
expectedValues.add(v);
k = newKey(rowS, cfS, cqS, cvS, ts);
assertEquals(27, k.getSize());
v = newValue("" + val);
trf.writer.append(k, v);
expectedKeys.add(k);
expectedValues.add(v);
val++;
}
}
}
}
}
// trf.writer.append(newKey("r1","cf1","cq1","L1", 55), newValue("foo"));
trf.closeWriter();
trf.openReader();
// seek before everything
trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
verify(trf, expectedKeys.iterator(), expectedValues.iterator());
// seek to the middle
int index = expectedKeys.size() / 2;
trf.seek(expectedKeys.get(index));
verify(trf, expectedKeys.subList(index, expectedKeys.size()).iterator(),
expectedValues.subList(index, expectedKeys.size()).iterator());
// seek the first key
index = 0;
trf.seek(expectedKeys.get(index));
verify(trf, expectedKeys.subList(index, expectedKeys.size()).iterator(),
expectedValues.subList(index, expectedKeys.size()).iterator());
// seek to the last key
index = expectedKeys.size() - 1;
trf.seek(expectedKeys.get(index));
verify(trf, expectedKeys.subList(index, expectedKeys.size()).iterator(),
expectedValues.subList(index, expectedKeys.size()).iterator());
// seek after everything
index = expectedKeys.size();
trf.seek(new Key(new Text("z")));
verify(trf, expectedKeys.subList(index, expectedKeys.size()).iterator(),
expectedValues.subList(index, expectedKeys.size()).iterator());
// test seeking to the current location
index = expectedKeys.size() / 2;
trf.seek(expectedKeys.get(index));
assertTrue(trf.iter.hasTop());
assertEquals(expectedKeys.get(index), trf.iter.getTopKey());
assertEquals(expectedValues.get(index), trf.iter.getTopValue());
trf.iter.next();
index++;
assertTrue(trf.iter.hasTop());
assertEquals(expectedKeys.get(index), trf.iter.getTopKey());
assertEquals(expectedValues.get(index), trf.iter.getTopValue());
trf.seek(expectedKeys.get(index));
assertTrue(trf.iter.hasTop());
assertEquals(expectedKeys.get(index), trf.iter.getTopKey());
assertEquals(expectedValues.get(index), trf.iter.getTopValue());
// test seeking to each location in the file
index = 0;
for (Key key : expectedKeys) {
trf.seek(key);
assertTrue(trf.iter.hasTop());
assertEquals(key, trf.iter.getTopKey());
assertEquals(expectedValues.get(index), trf.iter.getTopValue());
if (index > 0) {
// Key pkey =
expectedKeys.get(index - 1);
// assertEquals(pkey, trf.reader.getPrevKey());
}
index++;
}
// test seeking backwards to each key
for (int i = expectedKeys.size() - 1; i >= 0; i--) {
Key key = expectedKeys.get(i);
trf.seek(key);
assertTrue(trf.iter.hasTop());
assertEquals(key, trf.iter.getTopKey());
assertEquals(expectedValues.get(i), trf.iter.getTopValue());
if (i - 1 > 0) {
// Key pkey =
expectedKeys.get(i - 1);
// assertEquals(pkey, trf.reader.getPrevKey());
}
}
assertEquals(expectedKeys.get(expectedKeys.size() - 1).getRow(), trf.reader.getLastRow());
// test seeking to random location and reading all data from that point
// there was an off by one bug with this in the transient index
for (int i = 0; i < 12; i++) {
index = RANDOM.get().nextInt(expectedKeys.size());
trf.seek(expectedKeys.get(index));
for (; index < expectedKeys.size(); index++) {
assertTrue(trf.iter.hasTop());
assertEquals(expectedKeys.get(index), trf.iter.getTopKey());
assertEquals(expectedValues.get(index), trf.iter.getTopValue());
trf.iter.next();
}
}
// count the number of index entries
FileSKVIterator iiter = trf.reader.getIndex();
int count = 0;
while (iiter.hasTop()) {
count++;
iiter.next();
}
assertEquals(20, count);
trf.closeReader();
}
@Test
public void test4() throws IOException {
TestRFile trf = new TestRFile(conf);
trf.openWriter();
RFile.Writer writer = trf.writer;
final Value foo1 = newValue("foo1");
final long ts = 55L;
writer.append(newKey("r1", "cf1", "cq1", "L1", ts), foo1);
// @formatter:off
final List<Key> badKeys = List.of(
newKey("r0", "cf1", "cq1", "L1", ts),
newKey("r1", "cf0", "cq1", "L1", ts),
newKey("r1", "cf1", "cq0", "L1", ts),
newKey("r1", "cf1", "cq1", "L0", ts),
newKey("r1", "cf1", "cq1", "L1", ts + 1)
);
// @formatter:on
badKeys.forEach(
key -> assertThrows(IllegalArgumentException.class, () -> writer.append(key, foo1)));
}
@Test
public void test5() throws IOException {
TestRFile trf = new TestRFile(conf);
trf.openWriter();
trf.writer.append(newKey("r1", "cf1", "cq1", "L1", 55), newValue("foo1"));
trf.writer.append(newKey("r1", "cf1", "cq4", "L1", 56), newValue("foo2"));
trf.closeWriter();
trf.openReader();
// test seeking between keys
trf.seek(newKey("r1", "cf1", "cq3", "L1", 55));
assertTrue(trf.iter.hasTop());
assertEquals(newKey("r1", "cf1", "cq4", "L1", 56), trf.iter.getTopKey());
assertEquals(newValue("foo2"), trf.iter.getTopValue());
// test seeking right before previous seek
trf.seek(newKey("r1", "cf1", "cq0", "L1", 55));
assertTrue(trf.iter.hasTop());
assertEquals(newKey("r1", "cf1", "cq1", "L1", 55), trf.iter.getTopKey());
assertEquals(newValue("foo1"), trf.iter.getTopValue());
assertEquals(new Text("r1"), trf.reader.getLastRow());
trf.closeReader();
}
@Test
public void test6() throws IOException {
TestRFile trf = new TestRFile(conf);
trf.openWriter();
for (int i = 0; i < 500; i++) {
trf.writer.append(newKey(formatString("r_", i), "cf1", "cq1", "L1", 55), newValue("foo1"));
}
trf.closeWriter();
trf.openReader();
// repeatedly seek to locations before the first key in the file
for (int i = 0; i < 10; i++) {
trf.seek(newKey(formatString("q_", i), "cf1", "cq1", "L1", 55));
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("r_", 0), "cf1", "cq1", "L1", 55), trf.iter.getTopKey());
assertEquals(newValue("foo1"), trf.iter.getTopValue());
}
// repeatedly seek to locations after the last key in the file
for (int i = 0; i < 10; i++) {
trf.seek(newKey(formatString("s_", i), "cf1", "cq1", "L1", 55));
assertFalse(trf.iter.hasTop());
}
assertEquals(new Text(formatString("r_", 499)), trf.reader.getLastRow());
trf.closeReader();
}
@Test
public void test7() throws IOException {
// these tests exercise setting the end key of a range
TestRFile trf = new TestRFile(conf);
trf.openWriter();
for (int i = 2; i < 50; i++) {
trf.writer.append(newKey(formatString("r_", i), "cf1", "cq1", "L1", 55), newValue("foo" + i));
}
trf.closeWriter();
trf.openReader();
// test that has top returns false when end of range reached
trf.iter.seek(new Range(newKey(formatString("r_", 3), "cf1", "cq1", "L1", 55), true,
newKey(formatString("r_", 4), "cf1", "cq1", "L1", 55), false), EMPTY_COL_FAMS, false);
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey(formatString("r_", 3), "cf1", "cq1", "L1", 55));
assertEquals(newValue("foo" + 3), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
// test seeking to a range that is between two keys, should not return anything
trf.iter.seek(
new Range(newKey(formatString("r_", 4) + "a", "cf1", "cq1", "L1", 55), true,
newKey(formatString("r_", 4) + "b", "cf1", "cq1", "L1", 55), true),
EMPTY_COL_FAMS, false);
assertFalse(trf.iter.hasTop());
// test seeking to another range after the previously seeked range, that is between the same two
// keys in the file
// as the previously seeked range.... this test an optimization on RFile
trf.iter.seek(
new Range(newKey(formatString("r_", 4) + "c", "cf1", "cq1", "L1", 55), true,
newKey(formatString("r_", 4) + "d", "cf1", "cq1", "L1", 55), true),
EMPTY_COL_FAMS, false);
assertFalse(trf.iter.hasTop());
trf.iter.seek(
new Range(newKey(formatString("r_", 4) + "e", "cf1", "cq1", "L1", 55), true,
newKey(formatString("r_", 4) + "f", "cf1", "cq1", "L1", 55), true),
EMPTY_COL_FAMS, false);
assertFalse(trf.iter.hasTop());
// now ensure we can seek somewhere, that triggering the optimization does not cause any
// problems
trf.iter.seek(new Range(newKey(formatString("r_", 5), "cf1", "cq1", "L1", 55), true,
newKey(formatString("r_", 6), "cf1", "cq1", "L1", 55), false), EMPTY_COL_FAMS, false);
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey(formatString("r_", 5), "cf1", "cq1", "L1", 55));
assertEquals(newValue("foo" + 5), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
// test seeking to range that is before the beginning of the file
trf.iter.seek(new Range(newKey(formatString("r_", 0), "cf1", "cq1", "L1", 55), true,
newKey(formatString("r_", 2), "cf1", "cq1", "L1", 55), false), EMPTY_COL_FAMS, false);
assertFalse(trf.iter.hasTop());
assertEquals(new Text(formatString("r_", 49)), trf.reader.getLastRow());
trf.reader.close();
}
@Test
public void test8() throws IOException {
TestRFile trf = new TestRFile(conf);
trf.openWriter();
for (int i = 0; i < 2500; i++) {
trf.writer.append(newKey(formatString("r_", i), "cf1", "cq1", "L1", 42), newValue("foo" + i));
}
trf.closeWriter();
trf.openReader();
// test seeking between each key forward
for (int i = 0; i < 2499; i++) {
trf.seek(newKey(formatString("r_", i), "cf1", "cq1", "L1", 42).followingKey(PartialKey.ROW));
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("r_", i + 1), "cf1", "cq1", "L1", 42), trf.iter.getTopKey());
}
// test seeking between each key forward
for (int i = 0; i < 2499; i += 2) {
trf.seek(newKey(formatString("r_", i), "cf1", "cq1", "L1", 42).followingKey(PartialKey.ROW));
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("r_", i + 1), "cf1", "cq1", "L1", 42), trf.iter.getTopKey());
}
// test seeking backwards between each key
for (int i = 2498; i >= 0; i--) {
trf.seek(newKey(formatString("r_", i), "cf1", "cq1", "L1", 42).followingKey(PartialKey.ROW));
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("r_", i + 1), "cf1", "cq1", "L1", 42), trf.iter.getTopKey());
}
trf.closeReader();
// do same test with col fam
trf = new TestRFile(conf);
trf.openWriter();
for (int i = 0; i < 2500; i++) {
trf.writer.append(newKey(formatString("r_", 0), formatString("cf_", i), "cq1", "L1", 42),
newValue("foo" + i));
}
trf.closeWriter();
trf.openReader();
// test seeking between each key forward
for (int i = 0; i < 2499; i++) {
trf.seek(newKey(formatString("r_", 0), formatString("cf_", i), "cq1", "L1", 42)
.followingKey(PartialKey.ROW_COLFAM));
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("r_", 0), formatString("cf_", i + 1), "cq1", "L1", 42),
trf.iter.getTopKey());
}
// test seeking between each key forward
for (int i = 0; i < 2499; i += 2) {
trf.seek(newKey(formatString("r_", 0), formatString("cf_", i), "cq1", "L1", 42)
.followingKey(PartialKey.ROW_COLFAM));
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("r_", 0), formatString("cf_", i + 1), "cq1", "L1", 42),
trf.iter.getTopKey());
}
// test seeking backwards between each key
for (int i = 2498; i >= 0; i--) {
trf.seek(newKey(formatString("r_", 0), formatString("cf_", i), "cq1", "L1", 42)
.followingKey(PartialKey.ROW_COLFAM));
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("r_", 0), formatString("cf_", i + 1), "cq1", "L1", 42),
trf.iter.getTopKey());
}
trf.closeReader();
// do same test with col qual
trf = new TestRFile(conf);
trf.openWriter();
for (int i = 0; i < 2500; i++) {
trf.writer.append(
newKey(formatString("r_", 0), formatString("cf_", 0), formatString("cq_", i), "L1", 42),
newValue("foo" + i));
}
trf.closeWriter();
trf.openReader();
// test seeking between each key forward
for (int i = 0; i < 2499; i++) {
trf.seek(
newKey(formatString("r_", 0), formatString("cf_", 0), formatString("cq_", i), "L1", 42)
.followingKey(PartialKey.ROW_COLFAM_COLQUAL));
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("r_", 0), formatString("cf_", 0), formatString("cq_", i + 1),
"L1", 42), trf.iter.getTopKey());
}
// test seeking between each key forward
for (int i = 0; i < 2499; i += 2) {
trf.seek(
newKey(formatString("r_", 0), formatString("cf_", 0), formatString("cq_", i), "L1", 42)
.followingKey(PartialKey.ROW_COLFAM_COLQUAL));
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("r_", 0), formatString("cf_", 0), formatString("cq_", i + 1),
"L1", 42), trf.iter.getTopKey());
}
// test seeking backwards between each key
for (int i = 2498; i >= 0; i--) {
trf.seek(
newKey(formatString("r_", 0), formatString("cf_", 0), formatString("cq_", i), "L1", 42)
.followingKey(PartialKey.ROW_COLFAM_COLQUAL));
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("r_", 0), formatString("cf_", 0), formatString("cq_", i + 1),
"L1", 42), trf.iter.getTopKey());
}
trf.closeReader();
}
public static Set<ByteSequence> newColFamByteSequence(String... colFams) {
HashSet<ByteSequence> cfs = new HashSet<>();
for (String cf : colFams) {
cfs.add(new ArrayByteSequence(cf));
}
return cfs;
}
@Test
public void test9() throws IOException {
TestRFile trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("cf1", "cf2"));
trf.writer.append(newKey("0000", "cf1", "doe,john", "", 4), newValue("1123 West Left st"));
trf.writer.append(newKey("0002", "cf2", "doe,jane", "", 5), newValue("1124 East Right st"));
trf.writer.startNewLocalityGroup("lg2", newColFamByteSequence("cf3", "cf4"));
trf.writer.append(newKey("0001", "cf3", "buck,john", "", 4), newValue("90 Slum st"));
trf.writer.append(newKey("0003", "cf4", "buck,jane", "", 5), newValue("09 Slum st"));
trf.writer.close();
trf.openReader();
// scan first loc group
Range r = new Range(newKey("0000", "cf1", "doe,john", "", 4), true,
newKey("0003", "cf4", "buck,jane", "", 5), true);
trf.iter.seek(r, newColFamByteSequence("cf1", "cf2"), true);
assertEquals(1, trf.reader.getNumLocalityGroupsSeeked());
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0000", "cf1", "doe,john", "", 4));
assertEquals(newValue("1123 West Left st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0002", "cf2", "doe,jane", "", 5));
assertEquals(newValue("1124 East Right st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
// scan second loc group
r = new Range(newKey("0000", "cf1", "doe,john", "", 4), true,
newKey("0003", "cf4", "buck,jane", "", 5), true);
trf.iter.seek(r, newColFamByteSequence("cf3", "cf4"), true);
assertEquals(1, trf.reader.getNumLocalityGroupsSeeked());
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0001", "cf3", "buck,john", "", 4));
assertEquals(newValue("90 Slum st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0003", "cf4", "buck,jane", "", 5));
assertEquals(newValue("09 Slum st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
// scan all loc groups
r = new Range(newKey("0000", "cf1", "doe,john", "", 4), true,
newKey("0003", "cf4", "buck,jane", "", 5), true);
trf.iter.seek(r, EMPTY_COL_FAMS, false);
assertEquals(2, trf.reader.getNumLocalityGroupsSeeked());
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0000", "cf1", "doe,john", "", 4));
assertEquals(newValue("1123 West Left st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0001", "cf3", "buck,john", "", 4));
assertEquals(newValue("90 Slum st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0002", "cf2", "doe,jane", "", 5));
assertEquals(newValue("1124 East Right st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0003", "cf4", "buck,jane", "", 5));
assertEquals(newValue("09 Slum st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
// scan no loc groups
r = new Range(newKey("0000", "cf1", "doe,john", "", 4), true,
newKey("0003", "cf4", "buck,jane", "", 5), true);
trf.iter.seek(r, newColFamByteSequence("saint", "dogooder"), true);
assertEquals(0, trf.reader.getNumLocalityGroupsSeeked());
assertFalse(trf.iter.hasTop());
// scan a subset of second locality group
r = new Range(newKey("0000", "cf1", "doe,john", "", 4), true,
newKey("0003", "cf4", "buck,jane", "", 5), true);
trf.iter.seek(r, newColFamByteSequence("cf4"), true);
assertEquals(1, trf.reader.getNumLocalityGroupsSeeked());
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0003", "cf4", "buck,jane", "", 5));
assertEquals(newValue("09 Slum st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
// scan a subset of second locality group
r = new Range(newKey("0000", "cf1", "doe,john", "", 4), true,
newKey("0003", "cf4", "buck,jane", "", 5), true);
trf.iter.seek(r, newColFamByteSequence("cf3"), true);
assertEquals(1, trf.reader.getNumLocalityGroupsSeeked());
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0001", "cf3", "buck,john", "", 4));
assertEquals(newValue("90 Slum st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
// scan subset of first loc group
r = new Range(newKey("0000", "cf1", "doe,john", "", 4), true,
newKey("0003", "cf4", "buck,jane", "", 5), true);
trf.iter.seek(r, newColFamByteSequence("cf1"), true);
assertEquals(1, trf.reader.getNumLocalityGroupsSeeked());
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0000", "cf1", "doe,john", "", 4));
assertEquals(newValue("1123 West Left st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
// scan subset of first loc group
r = new Range(newKey("0000", "cf1", "doe,john", "", 4), true,
newKey("0003", "cf4", "buck,jane", "", 5), true);
trf.iter.seek(r, newColFamByteSequence("cf2"), true);
assertEquals(1, trf.reader.getNumLocalityGroupsSeeked());
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0002", "cf2", "doe,jane", "", 5));
assertEquals(newValue("1124 East Right st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
// scan subset of all loc groups
r = new Range(newKey("0000", "cf1", "doe,john", "", 4), true,
newKey("0003", "cf4", "buck,jane", "", 5), true);
trf.iter.seek(r, newColFamByteSequence("cf1", "cf4"), true);
assertEquals(2, trf.reader.getNumLocalityGroupsSeeked());
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0000", "cf1", "doe,john", "", 4));
assertEquals(newValue("1123 West Left st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0003", "cf4", "buck,jane", "", 5));
assertEquals(newValue("09 Slum st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
trf.closeReader();
}
@Test
public void test10() throws IOException {
// test empty locality groups
TestRFile trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("cf1", "cf2"));
trf.writer.startNewLocalityGroup("lg2", newColFamByteSequence("cf3", "cf4"));
trf.writer.startDefaultLocalityGroup();
trf.writer.close();
trf.openReader();
trf.iter.seek(new Range(new Text(""), null), EMPTY_COL_FAMS, false);
assertFalse(trf.iter.hasTop());
trf.closeReader();
// another empty locality group test
trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("cf1", "cf2"));
trf.writer.append(newKey("0000", "cf1", "doe,john", "", 4), newValue("1123 West Left st"));
trf.writer.append(newKey("0002", "cf2", "doe,jane", "", 5), newValue("1124 East Right st"));
trf.writer.startNewLocalityGroup("lg2", newColFamByteSequence("cf3", "cf4"));
trf.writer.startDefaultLocalityGroup();
trf.writer.close();
trf.openReader();
trf.iter.seek(new Range(new Text(""), null), EMPTY_COL_FAMS, false);
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0000", "cf1", "doe,john", "", 4));
assertEquals(newValue("1123 West Left st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0002", "cf2", "doe,jane", "", 5));
assertEquals(newValue("1124 East Right st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
trf.closeReader();
// another empty locality group test
trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("cf1", "cf2"));
trf.writer.startNewLocalityGroup("lg2", newColFamByteSequence("cf3", "cf4"));
trf.writer.append(newKey("0001", "cf3", "buck,john", "", 4), newValue("90 Slum st"));
trf.writer.append(newKey("0003", "cf4", "buck,jane", "", 5), newValue("09 Slum st"));
trf.writer.startDefaultLocalityGroup();
trf.writer.close();
trf.openReader();
trf.iter.seek(new Range(new Text(""), null), EMPTY_COL_FAMS, false);
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0001", "cf3", "buck,john", "", 4));
assertEquals(newValue("90 Slum st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0003", "cf4", "buck,jane", "", 5));
assertEquals(newValue("09 Slum st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
trf.closeReader();
// another empty locality group test
trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("cf1", "cf2"));
trf.writer.startNewLocalityGroup("lg2", newColFamByteSequence("cf3", "cf4"));
trf.writer.startDefaultLocalityGroup();
trf.writer.append(newKey("0007", "good citizen", "q,john", "", 4), newValue("70 Apple st"));
trf.writer.append(newKey("0008", "model citizen", "q,jane", "", 5), newValue("81 Plum st"));
trf.writer.close();
trf.openReader();
trf.iter.seek(new Range(new Text(""), null), EMPTY_COL_FAMS, false);
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0007", "good citizen", "q,john", "", 4));
assertEquals(newValue("70 Apple st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0008", "model citizen", "q,jane", "", 5));
assertEquals(newValue("81 Plum st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
trf.closeReader();
// another empty locality group test
trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("cf1", "cf2"));
trf.writer.append(newKey("0000", "cf1", "doe,john", "", 4), newValue("1123 West Left st"));
trf.writer.append(newKey("0002", "cf2", "doe,jane", "", 5), newValue("1124 East Right st"));
trf.writer.startNewLocalityGroup("lg2", newColFamByteSequence("cf3", "cf4"));
trf.writer.startDefaultLocalityGroup();
trf.writer.append(newKey("0007", "good citizen", "q,john", "", 4), newValue("70 Apple st"));
trf.writer.append(newKey("0008", "model citizen", "q,jane", "", 5), newValue("81 Plum st"));
trf.writer.close();
trf.openReader();
trf.iter.seek(new Range(new Text(""), null), EMPTY_COL_FAMS, false);
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0000", "cf1", "doe,john", "", 4));
assertEquals(newValue("1123 West Left st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0002", "cf2", "doe,jane", "", 5));
assertEquals(newValue("1124 East Right st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0007", "good citizen", "q,john", "", 4));
assertEquals(newValue("70 Apple st"), trf.iter.getTopValue());
trf.iter.next();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0008", "model citizen", "q,jane", "", 5));
assertEquals(newValue("81 Plum st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
trf.closeReader();
}
@Test
public void test11() throws IOException {
// test locality groups with more than two entries
TestRFile trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("3mod10"));
for (int i = 3; i < 1024; i += 10) {
trf.writer.append(newKey(formatString("i", i), "3mod10", "", "", i + 2), newValue("" + i));
}
trf.writer.startNewLocalityGroup("lg2", newColFamByteSequence("5mod10", "7mod10"));
for (int i = 5; i < 1024;) {
trf.writer.append(newKey(formatString("i", i), "5mod10", "", "", i + 2), newValue("" + i));
i += 2;
trf.writer.append(newKey(formatString("i", i), "7mod10", "", "", i + 2), newValue("" + i));
i += 8;
}
trf.writer.startDefaultLocalityGroup();
for (int i = 0; i < 1024; i++) {
int m10 = i % 10;
if (m10 == 3 || m10 == 5 || m10 == 7) {
continue;
}
trf.writer.append(newKey(formatString("i", i), m10 + "mod10", "", "", i + 2),
newValue("" + i));
}
trf.writer.close();
// test a merged read of all column families
trf.openReader();
trf.iter.seek(new Range(new Text(""), null), EMPTY_COL_FAMS, false);
assertEquals(3, trf.reader.getNumLocalityGroupsSeeked());
for (int i = 0; i < 1024; i++) {
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("i", i), (i % 10) + "mod10", "", "", i + 2),
trf.iter.getTopKey());
assertEquals(newValue("" + i), trf.iter.getTopValue());
trf.iter.next();
}
assertFalse(trf.iter.hasTop());
// try reading each of the 10 column families separately
for (int m = 0; m < 10; m++) {
trf.iter.seek(new Range(new Key(), true, null, true), newColFamByteSequence(m + "mod10"),
true);
assertEquals(1, trf.reader.getNumLocalityGroupsSeeked());
for (int i = m; i < 1024; i += 10) {
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("i", i), (i % 10) + "mod10", "", "", i + 2),
trf.iter.getTopKey());
assertEquals(newValue("" + i), trf.iter.getTopValue());
trf.iter.next();
}
assertFalse(trf.iter.hasTop());
// test excluding an individual column family
trf.iter.seek(new Range(new Key(), true, null, true), newColFamByteSequence(m + "mod10"),
false);
if (m == 3) {
assertEquals(2, trf.reader.getNumLocalityGroupsSeeked());
} else {
assertEquals(3, trf.reader.getNumLocalityGroupsSeeked());
}
for (int i = 0; i < 1024; i++) {
if (i % 10 == m) {
continue;
}
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("i", i), (i % 10) + "mod10", "", "", i + 2),
trf.iter.getTopKey());
assertEquals(newValue("" + i), trf.iter.getTopValue());
trf.iter.next();
}
assertFalse(trf.iter.hasTop());
}
// test Rfile deepcopy
SortedKeyValueIterator<Key,Value> reader2 = trf.iter.deepCopy(null);
// try reading from cloned reader at the same time as parent reader
for (int m = 0; m < 9; m++) {
trf.iter.seek(new Range(new Key(), true, null, true), newColFamByteSequence(m + "mod10"),
true);
assertEquals(1, trf.reader.getNumLocalityGroupsSeeked());
reader2.seek(new Range(new Key(), true, null, true), newColFamByteSequence((m + 1) + "mod10"),
true);
// assertEquals(1, reader2.getNumLocalityGroupsSeeked());
for (int i = m; i < 1024; i += 10) {
// System.out.println(m+","+i);
assertTrue(trf.iter.hasTop());
assertEquals(newKey(formatString("i", i), (i % 10) + "mod10", "", "", i + 2),
trf.iter.getTopKey());
assertEquals(newValue("" + i), trf.iter.getTopValue());
trf.iter.next();
if (i + 1 < 1024) {
assertTrue(reader2.hasTop());
assertEquals(newKey(formatString("i", (i + 1)), ((i + 1) % 10) + "mod10", "", "", i + 3),
reader2.getTopKey());
assertEquals(newValue("" + (i + 1)), reader2.getTopValue());
reader2.next();
}
}
assertFalse(trf.iter.hasTop());
assertFalse(reader2.hasTop());
}
trf.closeReader();
}
@Test
public void test12() throws IOException {
// test inserting column fams not in locality groups
TestRFile trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("a", "b"));
trf.writer.append(newKey("0007", "a", "cq1", "", 4), newValue("1"));
assertThrows(IllegalArgumentException.class,
() -> trf.writer.append(newKey("0009", "c", "cq1", "", 4), newValue("1")));
trf.closeWriter();
trf.openReader();
trf.iter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(trf.iter.hasTop());
assertEquals(newKey("0007", "a", "cq1", "", 4), trf.iter.getTopKey());
assertEquals(newValue("1"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
}
@Test
public void test13() throws IOException {
// test inserting column fam in default loc group that was in
// previous locality group
TestRFile trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("a", "b"));
final Value valueOf1 = newValue("1");
trf.writer.append(newKey("0007", "a", "cq1", "", 4), valueOf1);
trf.writer.startDefaultLocalityGroup();
assertThrows(IllegalArgumentException.class,
() -> trf.writer.append(newKey("0008", "a", "cq1", "", 4), valueOf1));
assertThrows(IllegalArgumentException.class,
() -> trf.writer.append(newKey("0009", "b", "cq1", "", 4), valueOf1));
trf.closeWriter();
trf.openReader();
trf.iter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(trf.iter.hasTop());
assertEquals(newKey("0007", "a", "cq1", "", 4), trf.iter.getTopKey());
assertEquals(valueOf1, trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
}
@Test
public void test14() throws IOException {
// test starting locality group after default locality group was started
TestRFile trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startDefaultLocalityGroup();
Set<ByteSequence> columnFamilies = newColFamByteSequence("a", "b");
assertThrows(IllegalStateException.class,
() -> trf.writer.startNewLocalityGroup("lg1", columnFamilies));
assertThrows(IllegalStateException.class, () -> trf.writer.startDefaultLocalityGroup());
trf.writer.close();
}
@Test
public void test16() throws IOException {
TestRFile trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("a", "b"));
trf.writer.append(newKey("0007", "a", "cq1", "", 4), newValue("1"));
Set<ByteSequence> columnFamilies = newColFamByteSequence("b", "c");
assertThrows(IllegalArgumentException.class,
() -> trf.writer.startNewLocalityGroup("lg1", columnFamilies));
trf.closeWriter();
}
@Test
public void test17() throws IOException {
// add alot of the same keys to rfile that cover multiple blocks...
// this should cause the keys in the index to be exactly the same...
// ensure seeks work correctly
TestRFile trf = new TestRFile(conf);
trf.openWriter(false);
trf.writer.startDefaultLocalityGroup();
for (int i = 0; i < 2048; i++) {
trf.writer.append(newKey("r0000", "cf1", "cq1", "", 1), newValue("" + i));
}
for (int i = 2048; i < 4096; i++) {
trf.writer.append(newKey("r0001", "cf1", "cq1", "", 1), newValue("" + i));
}
trf.writer.close();
trf.openReader();
FileSKVIterator indexIter = trf.reader.getIndex();
int count = 0;
while (indexIter.hasTop()) {
count++;
indexIter.next();
}
assertTrue(count > 4);
trf.iter.seek(new Range(newKey("r0000", "cf1", "cq1", "", 1), true,
newKey("r0001", "cf1", "cq1", "", 1), false), EMPTY_COL_FAMS, false);
for (int i = 0; i < 2048; i++) {
assertTrue(trf.iter.hasTop());
assertEquals(newKey("r0000", "cf1", "cq1", "", 1), trf.iter.getTopKey());
assertEquals(newValue("" + i), trf.iter.getTopValue());
trf.iter.next();
}
assertFalse(trf.iter.hasTop());
trf.iter.seek(new Range(newKey("r0000", "cf1", "cq1", "", 1), false,
newKey("r0001", "cf1", "cq1", "", 1), true), EMPTY_COL_FAMS, false);
for (int i = 2048; i < 4096; i++) {
assertTrue(trf.iter.hasTop());
assertEquals(newKey("r0001", "cf1", "cq1", "", 1), trf.iter.getTopKey());
assertEquals(newValue("" + i), trf.iter.getTopValue());
trf.iter.next();
}
assertFalse(trf.iter.hasTop());
trf.iter.seek(new Range(newKey("r0001", "cf1", "cq1", "", 1), true,
newKey("r0001", "cf1", "cq1", "", 1), true), EMPTY_COL_FAMS, false);
for (int i = 2048; i < 4096; i++) {
assertTrue(trf.iter.hasTop());
assertEquals(newKey("r0001", "cf1", "cq1", "", 1), trf.iter.getTopKey());
assertEquals(newValue("" + i), trf.iter.getTopValue());
trf.iter.next();
}
assertFalse(trf.iter.hasTop());
trf.iter.seek(new Range(newKey("r0002", "cf1", "cq1", "", 1), true,
newKey("r0002", "cf1", "cq1", "", 1), true), EMPTY_COL_FAMS, false);
assertFalse(trf.iter.hasTop());
trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
for (int i = 0; i < 2048; i++) {
assertTrue(trf.iter.hasTop());
assertEquals(newKey("r0000", "cf1", "cq1", "", 1), trf.iter.getTopKey());
assertEquals(newValue("" + i), trf.iter.getTopValue());
trf.iter.next();
}
for (int i = 2048; i < 4096; i++) {
assertTrue(trf.iter.hasTop());
assertEquals(newKey("r0001", "cf1", "cq1", "", 1), trf.iter.getTopKey());
assertEquals(newValue("" + i), trf.iter.getTopValue());
trf.iter.next();
}
assertFalse(trf.iter.hasTop());
trf.closeReader();
}
private String t18ncf(int i) {
return String.format("cf%06d", i);
}
private Set<ByteSequence> t18newColFamByteSequence(int... colFams) {
HashSet<ByteSequence> cfs = new HashSet<>();
for (int i : colFams) {
cfs.add(new ArrayByteSequence(t18ncf(i)));
}
return cfs;
}
private void t18Append(TestRFile trf, HashSet<ByteSequence> allCf, int i) throws IOException {
String cf = t18ncf(i);
trf.writer.append(newKey("r0000", cf, "cq1", "", 1), newValue("" + i));
allCf.add(new ArrayByteSequence(cf));
}
private void t18Verify(Set<ByteSequence> cfs, SortedKeyValueIterator<Key,Value> iter,
Reader reader, HashSet<ByteSequence> allCf, int eialg, int eealg) throws IOException {
HashSet<ByteSequence> colFamsSeen = new HashSet<>();
iter.seek(new Range(), cfs, true);
assertEquals(eialg, reader.getNumLocalityGroupsSeeked());
while (iter.hasTop()) {
colFamsSeen.add(iter.getTopKey().getColumnFamilyData());
iter.next();
}
HashSet<ByteSequence> expected = new HashSet<>(allCf);
expected.retainAll(cfs);
assertEquals(expected, colFamsSeen);
iter.seek(new Range(), cfs, false);
assertEquals(eealg, reader.getNumLocalityGroupsSeeked());
colFamsSeen.clear();
while (iter.hasTop()) {
colFamsSeen.add(iter.getTopKey().getColumnFamilyData());
iter.next();
}
HashSet<ByteSequence> nonExcluded = new HashSet<>(allCf);
nonExcluded.removeAll(cfs);
assertEquals(nonExcluded, colFamsSeen);
}
@Test
public void test18() throws IOException {
// test writing more column families to default LG than it will track
TestRFile trf = new TestRFile(conf);
trf.openWriter(false);
HashSet<ByteSequence> allCf = new HashSet<>();
trf.writer.startNewLocalityGroup("lg1", t18newColFamByteSequence(0));
for (int i = 0; i < 1; i++) {
t18Append(trf, allCf, i);
}
trf.writer.startNewLocalityGroup("lg2", t18newColFamByteSequence(1, 2));
for (int i = 1; i < 3; i++) {
t18Append(trf, allCf, i);
}
trf.writer.startNewLocalityGroup("lg3", t18newColFamByteSequence(3, 4, 5));
for (int i = 3; i < 6; i++) {
t18Append(trf, allCf, i);
}
trf.writer.startDefaultLocalityGroup();
int max = 6 + RFile.Writer.MAX_CF_IN_DLG + 100;
for (int i = 6; i < max; i++) {
t18Append(trf, allCf, i);
}
trf.closeWriter();
trf.openReader();
t18Verify(t18newColFamByteSequence(0), trf.iter, trf.reader, allCf, 1, 3);
for (int i = 1; i < 10; i++) {
t18Verify(t18newColFamByteSequence(i), trf.iter, trf.reader, allCf, 1, 4);
}
t18Verify(t18newColFamByteSequence(max + 1), trf.iter, trf.reader, allCf, 1, 4);
t18Verify(t18newColFamByteSequence(1, 2, 3, 4), trf.iter, trf.reader, allCf, 2, 3);
t18Verify(t18newColFamByteSequence(1, 2, 3, 4, 5), trf.iter, trf.reader, allCf, 2, 2);
t18Verify(t18newColFamByteSequence(0, 1, 2, 3, 4), trf.iter, trf.reader, allCf, 3, 2);
t18Verify(t18newColFamByteSequence(0, 1, 2, 3, 4, 5), trf.iter, trf.reader, allCf, 3, 1);
t18Verify(t18newColFamByteSequence(0, 1, 2, 3, 4, 5, 6), trf.iter, trf.reader, allCf, 4, 1);
t18Verify(t18newColFamByteSequence(0, 1), trf.iter, trf.reader, allCf, 2, 3);
t18Verify(t18newColFamByteSequence(2, 3), trf.iter, trf.reader, allCf, 2, 4);
t18Verify(t18newColFamByteSequence(5, 6), trf.iter, trf.reader, allCf, 2, 4);
trf.closeReader();
}
@Test
public void test19() throws IOException {
// test RFile metastore
TestRFile trf = new TestRFile(conf);
trf.openWriter(false);
trf.openWriter(false);
trf.writer.startNewLocalityGroup("lg1", newColFamByteSequence("cf1", "cf2"));
trf.writer.append(newKey("0000", "cf1", "doe,john", "", 4), newValue("1123 West Left st"));
trf.writer.append(newKey("0002", "cf2", "doe,jane", "", 5), newValue("1124 East Right st"));
trf.writer.startNewLocalityGroup("lg2", newColFamByteSequence("cf3", "cf4"));
DataOutputStream dos = trf.writer.createMetaStore("count");
dos.writeInt(2);
dos.writeUTF("data1");
dos.writeInt(1);
dos.writeUTF("data2");
dos.writeInt(1);
dos.close();
trf.closeWriter();
trf.openReader();
trf.iter.seek(new Range(), EMPTY_COL_FAMS, false);
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0000", "cf1", "doe,john", "", 4));
assertEquals(newValue("1123 West Left st"), trf.iter.getTopValue());
trf.iter.next();
DataInputStream in = trf.reader.getMetaStore("count");
assertEquals(2, in.readInt());
assertEquals("data1", in.readUTF());
assertEquals(1, in.readInt());
assertEquals("data2", in.readUTF());
assertEquals(1, in.readInt());
in.close();
assertTrue(trf.iter.hasTop());
assertEquals(trf.iter.getTopKey(), newKey("0002", "cf2", "doe,jane", "", 5));
assertEquals(newValue("1124 East Right st"), trf.iter.getTopValue());
trf.iter.next();
assertFalse(trf.iter.hasTop());
trf.closeReader();
}
@Test
public void testReseekUnconsumed() throws Exception {
TestRFile trf = new TestRFile(conf);
trf.openWriter();
for (int i = 0; i < 2500; i++) {
trf.writer.append(newKey(formatString("r_", i), "cf1", "cq1", "L1", 42), newValue("foo" + i));
}
trf.closeWriter();
trf.openReader();
Set<ByteSequence> cfs = Collections.emptySet();
for (int count = 0; count < 100; count++) {
int start = RANDOM.get().nextInt(2300);
Range range = new Range(newKey(formatString("r_", start), "cf1", "cq1", "L1", 42),
newKey(formatString("r_", start + 100), "cf1", "cq1", "L1", 42));
trf.reader.seek(range, cfs, false);
int numToScan = RANDOM.get().nextInt(100);
for (int j = 0; j < numToScan; j++) {
assertTrue(trf.reader.hasTop());
assertEquals(newKey(formatString("r_", start + j), "cf1", "cq1", "L1", 42),
trf.reader.getTopKey());
trf.reader.next();
}
assertTrue(trf.reader.hasTop());
assertEquals(newKey(formatString("r_", start + numToScan), "cf1", "cq1", "L1", 42),
trf.reader.getTopKey());
// seek a little forward from the last range and read a few keys within the unconsumed portion
// of the last range
int start2 = start + numToScan + RANDOM.get().nextInt(3);
int end2 = start2 + RANDOM.get().nextInt(3);
range = new Range(newKey(formatString("r_", start2), "cf1", "cq1", "L1", 42),
newKey(formatString("r_", end2), "cf1", "cq1", "L1", 42));
trf.reader.seek(range, cfs, false);
for (int j = start2; j <= end2; j++) {
assertTrue(trf.reader.hasTop());
assertEquals(newKey(formatString("r_", j), "cf1", "cq1", "L1", 42), trf.reader.getTopKey());
trf.reader.next();
}
assertFalse(trf.reader.hasTop());
}
trf.closeReader();
}
@Test
public void testMissingUnreleasedVersions() {
assertThrows(NullPointerException.class,
() -> runVersionTest(5, getAccumuloConfig(ConfigMode.CRYPTO_OFF)));
}
@Test
public void testOldVersions() throws Exception {
ConfigurationCopy defaultConf = getAccumuloConfig(ConfigMode.CRYPTO_OFF);
runVersionTest(3, defaultConf);
runVersionTest(4, defaultConf);
runVersionTest(6, defaultConf);
runVersionTest(7, defaultConf);
}
@Test
public void testOldVersionsWithCrypto() throws Exception {
ConfigurationCopy cryptoOnConf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
runVersionTest(3, cryptoOnConf);
runVersionTest(4, cryptoOnConf);
runVersionTest(6, cryptoOnConf);
runVersionTest(7, cryptoOnConf);
}
private void runVersionTest(int version, ConfigurationCopy aconf) throws Exception {
InputStream in = this.getClass().getClassLoader()
.getResourceAsStream("org/apache/accumulo/core/file/rfile/ver_" + version + ".rf");
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte[] buf = new byte[1024];
int read;
while ((read = in.read(buf)) > 0) {
baos.write(buf, 0, read);
}
byte[] data = baos.toByteArray();
SeekableByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
FSDataInputStream in2 = new FSDataInputStream(bais);
aconf.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
aconf.set(Property.TSERV_DEFAULT_BLOCKSIZE, Long.toString(100000));
aconf.set(Property.TSERV_DATACACHE_SIZE, Long.toString(100000000));
aconf.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(100000000));
BlockCacheManager manager = BlockCacheManagerFactory.getInstance(aconf);
manager.start(BlockCacheConfiguration.forTabletServer(aconf));
CryptoService cs = CryptoFactoryLoader.getServiceForClient(CryptoEnvironment.Scope.TABLE,
aconf.getAllCryptoProperties());
CachableBuilder cb = new CachableBuilder().input(in2, "cache-1").length(data.length)
.conf(hadoopConf).cryptoService(cs).cacheProvider(new BasicCacheProvider(
manager.getBlockCache(CacheType.INDEX), manager.getBlockCache(CacheType.DATA)));
Reader reader = new RFile.Reader(cb);
checkIndex(reader);
ColumnFamilySkippingIterator iter = new ColumnFamilySkippingIterator(reader);
for (int start : new int[] {0, 10, 100, 998}) {
for (int cf = 1; cf <= 4; cf++) {
if (start == 0) {
iter.seek(new Range(), newColFamByteSequence(formatString("cf_", cf)), true);
} else {
iter.seek(new Range(formatString("r_", start), null),
newColFamByteSequence(formatString("cf_", cf)), true);
}
for (int i = start; i < 1000; i++) {
assertTrue(iter.hasTop());
assertEquals(newKey(formatString("r_", i), formatString("cf_", cf),
formatString("cq_", 0), "", 1000 - i), iter.getTopKey());
assertEquals(newValue(i + ""), iter.getTopValue());
iter.next();
}
assertFalse(iter.hasTop());
}
if (start == 0) {
iter.seek(new Range(), newColFamByteSequence(), false);
} else {
iter.seek(new Range(formatString("r_", start), null), newColFamByteSequence(), false);
}
for (int i = start; i < 1000; i++) {
for (int cf = 1; cf <= 4; cf++) {
assertTrue(iter.hasTop());
assertEquals(newKey(formatString("r_", i), formatString("cf_", cf),
formatString("cq_", 0), "", 1000 - i), iter.getTopKey());
assertEquals(newValue(i + ""), iter.getTopValue());
iter.next();
}
}
assertFalse(iter.hasTop());
}
manager.stop();
reader.close();
}
private ConfigurationCopy getAccumuloConfig(ConfigMode configMode) {
return CryptoTest.getAccumuloConfig(configMode, getClass());
}
@Test
public void testEncRFile1() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test1();
conf = null;
}
@Test
public void testEncRFile2() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test2();
conf = null;
}
@Test
public void testEncRFile3() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test3();
conf = null;
}
@Test
public void testEncRFile4() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test4();
conf = null;
}
@Test
public void testEncRFile5() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test5();
conf = null;
}
@Test
public void testEncRFile6() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test6();
conf = null;
}
@Test
public void testEncRFile7() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test7();
conf = null;
}
@Test
public void testEncRFile8() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test8();
conf = null;
}
@Test
public void testEncRFile9() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test9();
conf = null;
}
@Test
public void testEncRFile10() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test10();
conf = null;
}
@Test
public void testEncRFile11() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test11();
conf = null;
}
@Test
public void testEncRFile12() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test12();
conf = null;
}
@Test
public void testEncRFile13() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test13();
conf = null;
}
@Test
public void testEncRFile14() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test14();
conf = null;
}
@Test
public void testEncRFile16() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test16();
}
@Test
public void testEncRFile17() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test17();
}
@Test
public void testEncRFile18() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test18();
conf = null;
}
@Test
public void testEncRFile19() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test19();
conf = null;
}
@Test
public void testEncryptedRFiles() throws Exception {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
test1();
test2();
test3();
test4();
test5();
test6();
test7();
test8();
conf = null;
}
private Key newKey(int r, int c) {
String row = String.format("r%06d", r);
switch (c) {
case 0:
return new Key(row, "user", "addr");
case 1:
return new Key(row, "user", "name");
default:
throw new IllegalArgumentException();
}
}
private Value newValue(int r, int c) {
switch (c) {
case 0:
return new Value("123" + r + " west st");
case 1:
return new Value("bob" + r);
default:
throw new IllegalArgumentException();
}
}
private static void hash(Hasher hasher, Key key, Value val) {
hasher.putBytes(key.getRowData().toArray());
hasher.putBytes(key.getColumnFamilyData().toArray());
hasher.putBytes(key.getColumnQualifierData().toArray());
hasher.putBytes(key.getColumnVisibilityData().toArray());
hasher.putLong(key.getTimestamp());
hasher.putBoolean(key.isDeleted());
hasher.putBytes(val.get());
}
private static void add(TestRFile trf, Key key, Value val, Hasher dataHasher,
List<Entry<Key,Value>> sample, Sampler sampler) throws IOException {
if (sampler.accept(key)) {
sample.add(new AbstractMap.SimpleImmutableEntry<>(key, val));
}
hash(dataHasher, key, val);
trf.writer.append(key, val);
}
private List<Entry<Key,Value>> toList(SortedKeyValueIterator<Key,Value> sample)
throws IOException {
ArrayList<Entry<Key,Value>> ret = new ArrayList<>();
while (sample.hasTop()) {
ret.add(new AbstractMap.SimpleImmutableEntry<>(new Key(sample.getTopKey()),
new Value(sample.getTopValue())));
sample.next();
}
return ret;
}
private void checkSample(SortedKeyValueIterator<Key,Value> sample,
List<Entry<Key,Value>> sampleData) throws IOException {
checkSample(sample, sampleData, EMPTY_COL_FAMS, false);
}
private void checkSample(SortedKeyValueIterator<Key,Value> sample,
List<Entry<Key,Value>> sampleData, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
sample.seek(new Range(), columnFamilies, inclusive);
assertEquals(sampleData, toList(sample));
// randomly seek sample iterator and verify
for (int i = 0; i < 33; i++) {
Key startKey = null;
boolean startInclusive = false;
int startIndex = 0;
Key endKey = null;
boolean endInclusive = false;
int endIndex = sampleData.size();
if (RANDOM.get().nextBoolean()) {
startIndex = RANDOM.get().nextInt(sampleData.size());
startKey = sampleData.get(startIndex).getKey();
startInclusive = RANDOM.get().nextBoolean();
if (!startInclusive) {
startIndex++;
}
}
if (startIndex < endIndex && RANDOM.get().nextBoolean()) {
endIndex -= RANDOM.get().nextInt(endIndex - startIndex);
endKey = sampleData.get(endIndex - 1).getKey();
endInclusive = RANDOM.get().nextBoolean();
if (!endInclusive) {
endIndex--;
}
} else if (startIndex == endIndex) {
endInclusive = RANDOM.get().nextBoolean();
}
sample.seek(new Range(startKey, startInclusive, endKey, endInclusive), columnFamilies,
inclusive);
assertEquals(sampleData.subList(startIndex, endIndex), toList(sample));
}
}
@Test
public void testSample() throws IOException {
int num = 10000;
for (int sampleBufferSize : new int[] {1 << 10, 1 << 20}) {
// force sample buffer to flush for smaller data
RFile.setSampleBufferSize(sampleBufferSize);
for (int modulus : new int[] {19, 103, 1019}) {
Hasher dataHasher = Hashing.sha512().newHasher();
List<Entry<Key,Value>> sampleData = new ArrayList<>();
ConfigurationCopy sampleConf =
new ConfigurationCopy(conf == null ? DefaultConfiguration.getInstance() : conf);
sampleConf.set(Property.TABLE_SAMPLER, RowSampler.class.getName());
sampleConf.set(Property.TABLE_SAMPLER_OPTS + "hasher", "murmur3_32");
sampleConf.set(Property.TABLE_SAMPLER_OPTS + "modulus", modulus + "");
Sampler sampler = SamplerFactory
.newSampler(SamplerConfigurationImpl.newSamplerConfig(sampleConf), sampleConf);
TestRFile trf = new TestRFile(sampleConf);
trf.openWriter();
for (int i = 0; i < num; i++) {
add(trf, newKey(i, 0), newValue(i, 0), dataHasher, sampleData, sampler);
add(trf, newKey(i, 1), newValue(i, 1), dataHasher, sampleData, sampler);
}
HashCode expectedDataHash = dataHasher.hash();
trf.closeWriter();
trf.openReader();
FileSKVIterator sample =
trf.reader.getSample(SamplerConfigurationImpl.newSamplerConfig(sampleConf));
checkSample(sample, sampleData);
assertEquals(expectedDataHash, hash(trf.reader));
SampleIE ie = new SampleIE(
SamplerConfigurationImpl.newSamplerConfig(sampleConf).toSamplerConfiguration());
for (int i = 0; i < 3; i++) {
// test opening and closing deep copies a few times.
trf.reader.closeDeepCopies();
sample = trf.reader.getSample(SamplerConfigurationImpl.newSamplerConfig(sampleConf));
SortedKeyValueIterator<Key,Value> sampleDC1 = sample.deepCopy(ie);
SortedKeyValueIterator<Key,Value> sampleDC2 = sample.deepCopy(ie);
SortedKeyValueIterator<Key,Value> sampleDC3 = trf.reader.deepCopy(ie);
SortedKeyValueIterator<Key,Value> allDC1 = sampleDC1.deepCopy(new SampleIE(null));
SortedKeyValueIterator<Key,Value> allDC2 = sample.deepCopy(new SampleIE(null));
assertEquals(expectedDataHash, hash(allDC1));
assertEquals(expectedDataHash, hash(allDC2));
checkSample(sample, sampleData);
checkSample(sampleDC1, sampleData);
checkSample(sampleDC2, sampleData);
checkSample(sampleDC3, sampleData);
}
trf.reader.closeDeepCopies();
trf.closeReader();
}
}
}
private HashCode hash(SortedKeyValueIterator<Key,Value> iter) throws IOException {
Hasher dataHasher = Hashing.sha512().newHasher();
iter.seek(new Range(), EMPTY_COL_FAMS, false);
while (iter.hasTop()) {
hash(dataHasher, iter.getTopKey(), iter.getTopValue());
iter.next();
}
return dataHasher.hash();
}
@Test
public void testSampleLG() throws IOException {
int num = 5000;
for (int sampleBufferSize : new int[] {1 << 10, 1 << 20}) {
// force sample buffer to flush for smaller data
RFile.setSampleBufferSize(sampleBufferSize);
for (int modulus : new int[] {19, 103, 1019}) {
List<Entry<Key,Value>> sampleDataLG1 = new ArrayList<>();
List<Entry<Key,Value>> sampleDataLG2 = new ArrayList<>();
ConfigurationCopy sampleConf =
new ConfigurationCopy(conf == null ? DefaultConfiguration.getInstance() : conf);
sampleConf.set(Property.TABLE_SAMPLER, RowSampler.class.getName());
sampleConf.set(Property.TABLE_SAMPLER_OPTS + "hasher", "murmur3_32");
sampleConf.set(Property.TABLE_SAMPLER_OPTS + "modulus", modulus + "");
Sampler sampler = SamplerFactory
.newSampler(SamplerConfigurationImpl.newSamplerConfig(sampleConf), sampleConf);
TestRFile trf = new TestRFile(sampleConf);
trf.openWriter(false, 1000);
trf.writer.startNewLocalityGroup("meta-lg", newColFamByteSequence("metaA", "metaB"));
for (int r = 0; r < num; r++) {
String row = String.format("r%06d", r);
Key k1 = new Key(row, "metaA", "q9", 7);
Key k2 = new Key(row, "metaB", "q8", 7);
Key k3 = new Key(row, "metaB", "qA", 7);
Value v1 = new Value("" + r);
Value v2 = new Value("" + r * 93);
Value v3 = new Value("" + r * 113);
if (sampler.accept(k1)) {
sampleDataLG1.add(new AbstractMap.SimpleImmutableEntry<>(k1, v1));
sampleDataLG1.add(new AbstractMap.SimpleImmutableEntry<>(k2, v2));
sampleDataLG1.add(new AbstractMap.SimpleImmutableEntry<>(k3, v3));
}
trf.writer.append(k1, v1);
trf.writer.append(k2, v2);
trf.writer.append(k3, v3);
}
trf.writer.startDefaultLocalityGroup();
for (int r = 0; r < num; r++) {
String row = String.format("r%06d", r);
Key k1 = new Key(row, "dataA", "q9", 7);
Value v1 = new Value("" + r);
if (sampler.accept(k1)) {
sampleDataLG2.add(new AbstractMap.SimpleImmutableEntry<>(k1, v1));
}
trf.writer.append(k1, v1);
}
trf.closeWriter();
assertTrue(!sampleDataLG1.isEmpty());
assertTrue(!sampleDataLG2.isEmpty());
trf.openReader(false);
FileSKVIterator sample =
trf.reader.getSample(SamplerConfigurationImpl.newSamplerConfig(sampleConf));
checkSample(sample, sampleDataLG1, newColFamByteSequence("metaA", "metaB"), true);
checkSample(sample, sampleDataLG1, newColFamByteSequence("metaA"), true);
checkSample(sample, sampleDataLG1, newColFamByteSequence("metaB"), true);
checkSample(sample, sampleDataLG1, newColFamByteSequence("dataA"), false);
checkSample(sample, sampleDataLG2, newColFamByteSequence("metaA", "metaB"), false);
checkSample(sample, sampleDataLG2, newColFamByteSequence("dataA"), true);
ArrayList<Entry<Key,Value>> allSampleData = new ArrayList<>();
allSampleData.addAll(sampleDataLG1);
allSampleData.addAll(sampleDataLG2);
allSampleData.sort(Comparator.comparing(Entry::getKey));
checkSample(sample, allSampleData, newColFamByteSequence("dataA", "metaA"), true);
checkSample(sample, allSampleData, EMPTY_COL_FAMS, false);
trf.closeReader();
}
}
}
@Test
public void testEncSample() throws IOException {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
testSample();
testSampleLG();
conf = null;
}
@Test
public void testBigKeys() throws IOException {
// this test ensures that big keys do not end up index
ArrayList<Key> keys = new ArrayList<>();
for (int i = 0; i < 1000; i++) {
String row = String.format("r%06d", i);
keys.add(new Key(row, "cf1", "cq1", 42));
}
// add a few keys with long rows
for (int i = 0; i < 1000; i += 100) {
String row = String.format("r%06d", i);
char[] ca = new char[1000];
Arrays.fill(ca, 'b');
row = row + new String(ca);
keys.add(new Key(row, "cf1", "cq1", 42));
}
Collections.sort(keys);
TestRFile trf = new TestRFile(conf);
trf.openWriter();
for (Key k : keys) {
trf.writer.append(k, new Value(k.hashCode() + ""));
}
trf.writer.close();
trf.openReader();
FileSKVIterator iiter = trf.reader.getIndex();
while (iiter.hasTop()) {
Key k = iiter.getTopKey();
assertTrue(k.getSize() < 20, k + " " + k.getSize() + " >= 20");
iiter.next();
}
Collections.shuffle(keys);
for (Key key : keys) {
trf.reader.seek(new Range(key, null), EMPTY_COL_FAMS, false);
assertTrue(trf.reader.hasTop());
assertEquals(key, trf.reader.getTopKey());
assertEquals(new Value(key.hashCode() + ""), trf.reader.getTopValue());
}
}
@Test
public void testCryptoDoesntLeakSensitive() throws IOException {
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
// test an empty file
TestRFile trf = new TestRFile(conf);
trf.openWriter();
trf.closeWriter();
byte[] rfBytes = trf.baos.toByteArray();
// If we get here, we have encrypted bytes
for (Property prop : Property.values()) {
if (prop.isSensitive()) {
byte[] toCheck = prop.getKey().getBytes();
assertEquals(-1, Bytes.indexOf(rfBytes, toCheck));
}
}
}
@Test
public void testRootTabletEncryption() throws Exception {
// This tests that the normal set of operations used to populate a root tablet
conf = getAccumuloConfig(ConfigMode.CRYPTO_TABLE_ON);
TestRFile testRfile = new TestRFile(conf);
testRfile.openWriter();
RFile.Writer mfw = testRfile.writer;
// mfw.startDefaultLocalityGroup();
// mfw.startDefaultLocalityGroup();
Text tableExtent = new Text(
TabletsSection.encodeRow(MetadataTable.ID, TabletsSection.getRange().getEndKey().getRow()));
// table tablet's directory
Key tableDirKey = new Key(tableExtent, ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
mfw.append(tableDirKey, new Value(/* TABLE_TABLETS_TABLET_DIR */"/table_info"));
// table tablet time
Key tableTimeKey = new Key(tableExtent, ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
mfw.append(tableTimeKey, new Value(/* TabletTime.LOGICAL_TIME_ID */'L' + "0"));
// table tablet's prevRow
Key tablePrevRowKey = new Key(tableExtent, TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
mfw.append(tablePrevRowKey, TabletColumnFamily.encodePrevEndRow(null));
// ----------] default tablet info
Text defaultExtent = new Text(TabletsSection.encodeRow(MetadataTable.ID, null));
// default's directory
Key defaultDirKey =
new Key(defaultExtent, ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
mfw.append(defaultDirKey, new Value(ServerColumnFamily.DEFAULT_TABLET_DIR_NAME));
// default's time
Key defaultTimeKey = new Key(defaultExtent, ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
mfw.append(defaultTimeKey, new Value(/* TabletTime.LOGICAL_TIME_ID */'L' + "0"));
// default's prevRow
Key defaultPrevRowKey =
new Key(defaultExtent, TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
mfw.append(defaultPrevRowKey,
TabletColumnFamily.encodePrevEndRow(TabletsSection.getRange().getEndKey().getRow()));
testRfile.closeWriter();
if (true) {
FileOutputStream fileOutputStream =
new FileOutputStream(new File(tempDir, "testEncryptedRootFile.rf"));
fileOutputStream.write(testRfile.baos.toByteArray());
fileOutputStream.flush();
fileOutputStream.close();
}
testRfile.openReader();
testRfile.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
assertTrue(testRfile.iter.hasTop());
assertNotNull(testRfile.reader.getLastRow());
testRfile.closeReader();
conf = null;
}
}
| 9,334 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileMetricsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.util.LocalityGroupUtil;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import com.google.common.util.concurrent.AtomicLongMap;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths not set by user input")
public class RFileMetricsTest {
private TestRFile trf = null;
@BeforeEach
public void makeTestRFile() {
trf = new TestRFile();
}
@AfterEach
public void cleanUpTestRFile() {
// do our best to clean up first
if (trf != null) {
if (trf.writer != null) {
try {
trf.closeWriter();
} catch (IOException e) {
// ignore
}
}
if (trf.reader != null) {
try {
trf.closeReader();
} catch (IOException e) {
// ignore
}
}
}
trf = null;
}
public static class TestRFile extends RFileTest.TestRFile {
public TestRFile() {
super(null);
}
public VisMetricsGatherer gatherMetrics() throws IOException {
VisMetricsGatherer vmg = new VisMetricsGatherer();
reader.registerMetrics(vmg);
Map<String,ArrayList<ByteSequence>> localityGroupCF = reader.getLocalityGroupCF();
for (String lgName : localityGroupCF.keySet()) {
LocalityGroupUtil.seek(reader, new Range(), lgName, localityGroupCF);
while (reader.hasTop()) {
reader.next();
}
}
return vmg;
}
}
@Test
public void emptyFile() throws IOException {
// test an empty file
trf.openWriter();
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
Map<String,AtomicLongMap<String>> metrics = vmg.metric;
Map<String,AtomicLongMap<String>> blocks = vmg.blocks;
assertEquals(0, metrics.size());
assertEquals(0, blocks.size());
trf.closeReader();
}
@Test
public void oneEntryDefaultLocGroup() throws IOException {
// test an rfile with one entry in the default locality group
trf.openWriter();
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
AtomicLongMap<String> metrics = vmg.metric.get(null);
AtomicLongMap<String> blocks = vmg.blocks.get(null);
assertEquals(1, metrics.get("L1"));
assertEquals(1, blocks.get("L1"));
assertEquals(1, vmg.numEntries.get(vmg.localityGroups.indexOf(null)).longValue());
assertEquals(1, vmg.numBlocks.get(vmg.localityGroups.indexOf(null)).longValue());
trf.closeReader();
}
@Test
public void twoEntriesDefaultLocGroup() throws IOException {
// test an rfile with two entries in the default locality group
trf.openWriter();
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L2", 55), RFileTest.newValue("foo"));
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
AtomicLongMap<String> metrics = vmg.metric.get(null);
AtomicLongMap<String> blocks = vmg.blocks.get(null);
assertEquals(1, metrics.get("L1"));
assertEquals(1, metrics.get("L2"));
assertEquals(1, blocks.get("L1"));
assertEquals(1, blocks.get("L2"));
assertEquals(2, vmg.numEntries.get(vmg.localityGroups.indexOf(null)).longValue());
assertEquals(1, vmg.numBlocks.get(vmg.localityGroups.indexOf(null)).longValue());
trf.closeReader();
}
@Test
public void oneEntryNonDefaultLocGroup() throws IOException {
// test an rfile with two entries in a non-default locality group
trf.openWriter(false);
Set<ByteSequence> lg1 = new HashSet<>();
lg1.add(new ArrayByteSequence("cf1"));
trf.writer.startNewLocalityGroup("lg1", lg1);
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
AtomicLongMap<String> metrics = vmg.metric.get("lg1");
AtomicLongMap<String> blocks = vmg.blocks.get("lg1");
assertEquals(1, metrics.get("L1"));
assertEquals(1, blocks.get("L1"));
assertEquals(1, vmg.numEntries.get(vmg.localityGroups.indexOf("lg1")).longValue());
assertEquals(1, vmg.numBlocks.get(vmg.localityGroups.indexOf("lg1")).longValue());
trf.closeReader();
}
@Test
public void twoEntryNonDefaultLocGroup() throws IOException {
// test an rfile with two entries in a non-default locality group
trf.openWriter(false);
Set<ByteSequence> lg1 = new HashSet<>();
lg1.add(new ArrayByteSequence("cf1"));
trf.writer.startNewLocalityGroup("lg1", lg1);
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L2", 55), RFileTest.newValue("foo"));
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
AtomicLongMap<String> metrics = vmg.metric.get("lg1");
AtomicLongMap<String> blocks = vmg.blocks.get("lg1");
assertEquals(1, metrics.get("L1"));
assertEquals(1, metrics.get("L2"));
assertEquals(1, blocks.get("L1"));
assertEquals(1, blocks.get("L2"));
assertEquals(2, vmg.numEntries.get(vmg.localityGroups.indexOf("lg1")).longValue());
assertEquals(1, vmg.numBlocks.get(vmg.localityGroups.indexOf("lg1")).longValue());
trf.closeReader();
}
@Test
public void twoNonDefaultLocGroups() throws IOException {
// test an rfile with two entries in 2 non-default locality groups
trf.openWriter(false);
Set<ByteSequence> lg1 = new HashSet<>();
lg1.add(new ArrayByteSequence("cf1"));
trf.writer.startNewLocalityGroup("lg1", lg1);
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L2", 55), RFileTest.newValue("foo"));
Set<ByteSequence> lg2 = new HashSet<>();
lg2.add(new ArrayByteSequence("cf2"));
trf.writer.startNewLocalityGroup("lg2", lg2);
trf.writer.append(RFileTest.newKey("r1", "cf2", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf2", "cq1", "L2", 55), RFileTest.newValue("foo"));
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
AtomicLongMap<String> metrics = vmg.metric.get("lg1");
AtomicLongMap<String> blocks = vmg.blocks.get("lg1");
assertEquals(1, metrics.get("L1"));
assertEquals(1, metrics.get("L2"));
assertEquals(1, blocks.get("L1"));
assertEquals(1, blocks.get("L2"));
assertEquals(2, vmg.numEntries.get(vmg.localityGroups.indexOf("lg1")).longValue());
assertEquals(1, vmg.numBlocks.get(vmg.localityGroups.indexOf("lg1")).longValue());
metrics = vmg.metric.get("lg2");
blocks = vmg.blocks.get("lg2");
assertEquals(1, metrics.get("L1"));
assertEquals(1, metrics.get("L2"));
assertEquals(1, blocks.get("L1"));
assertEquals(1, blocks.get("L2"));
assertEquals(2, vmg.numEntries.get(vmg.localityGroups.indexOf("lg2")).longValue());
assertEquals(1, vmg.numBlocks.get(vmg.localityGroups.indexOf("lg2")).longValue());
trf.closeReader();
}
@Test
public void nonDefaultAndDefaultLocGroup() throws IOException {
// test an rfile with 3 entries in a non-default locality group and the default locality group
trf.openWriter(false);
Set<ByteSequence> lg1 = new HashSet<>();
lg1.add(new ArrayByteSequence("cf1"));
trf.writer.startNewLocalityGroup("lg1", lg1);
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq2", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq2", "L2", 55), RFileTest.newValue("foo"));
trf.writer.startDefaultLocalityGroup();
trf.writer.append(RFileTest.newKey("r1", "cf2", "cq1", "A", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf2", "cq1", "B", 55), RFileTest.newValue("foo"));
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
AtomicLongMap<String> metrics = vmg.metric.get("lg1");
AtomicLongMap<String> blocks = vmg.blocks.get("lg1");
assertEquals(2, metrics.get("L1"));
assertEquals(1, metrics.get("L2"));
assertEquals(1, blocks.get("L1"));
assertEquals(1, blocks.get("L2"));
assertEquals(3, vmg.numEntries.get(vmg.localityGroups.indexOf("lg1")).longValue());
assertEquals(1, vmg.numBlocks.get(vmg.localityGroups.indexOf("lg1")).longValue());
metrics = vmg.metric.get(null);
blocks = vmg.blocks.get(null);
assertEquals(1, metrics.get("A"));
assertEquals(1, metrics.get("B"));
assertEquals(1, blocks.get("A"));
assertEquals(1, blocks.get("B"));
assertEquals(2, vmg.numEntries.get(vmg.localityGroups.indexOf(null)).longValue());
assertEquals(1, vmg.numBlocks.get(vmg.localityGroups.indexOf(null)).longValue());
trf.closeReader();
}
@Test
public void multiCFNonDefaultAndDefaultLocGroup() throws IOException {
// test an rfile with multiple column families in a non-default locality group and the default
// locality group
trf.openWriter(false);
Set<ByteSequence> lg1 = new HashSet<>();
lg1.add(new ArrayByteSequence("cf1"));
lg1.add(new ArrayByteSequence("cf3"));
trf.writer.startNewLocalityGroup("lg1", lg1);
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq2", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf3", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf3", "cq2", "L2", 55), RFileTest.newValue("foo"));
trf.writer.startDefaultLocalityGroup();
trf.writer.append(RFileTest.newKey("r1", "cf2", "cq1", "A", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf2", "cq1", "B", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf4", "cq1", "A", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf4", "cq1", "B", 55), RFileTest.newValue("foo"));
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
AtomicLongMap<String> metrics = vmg.metric.get("lg1");
AtomicLongMap<String> blocks = vmg.blocks.get("lg1");
assertEquals(3, metrics.get("L1"));
assertEquals(1, metrics.get("L2"));
assertEquals(1, blocks.get("L1"));
assertEquals(1, blocks.get("L2"));
assertEquals(4, vmg.numEntries.get(vmg.localityGroups.indexOf("lg1")).longValue());
assertEquals(1, vmg.numBlocks.get(vmg.localityGroups.indexOf("lg1")).longValue());
metrics = vmg.metric.get(null);
blocks = vmg.blocks.get(null);
assertEquals(2, metrics.get("A"));
assertEquals(2, metrics.get("B"));
assertEquals(1, blocks.get("A"));
assertEquals(1, blocks.get("B"));
assertEquals(4, vmg.numEntries.get(vmg.localityGroups.indexOf(null)).longValue());
assertEquals(1, vmg.numBlocks.get(vmg.localityGroups.indexOf(null)).longValue());
trf.closeReader();
}
@Test
public void multiBlockDefaultLocGroup() throws IOException {
// test an rfile with four blocks in the default locality group
trf.openWriter(20);// Each entry is a block
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq2", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf3", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf3", "cq2", "L2", 55), RFileTest.newValue("foo"));
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
AtomicLongMap<String> metrics = vmg.metric.get(null);
AtomicLongMap<String> blocks = vmg.blocks.get(null);
assertEquals(3, metrics.get("L1"));
assertEquals(1, metrics.get("L2"));
assertEquals(3, blocks.get("L1"));
assertEquals(1, blocks.get("L2"));
assertEquals(4, vmg.numEntries.get(vmg.localityGroups.indexOf(null)).longValue());
assertEquals(4, vmg.numBlocks.get(vmg.localityGroups.indexOf(null)).longValue());
trf.closeReader();
}
@Test
public void multiBlockNonDefaultLocGroup() throws IOException {
// test an rfile with four blocks in a non-default locality group
trf.openWriter(false, 20);// Each entry is a block
Set<ByteSequence> lg1 = new HashSet<>();
lg1.add(new ArrayByteSequence("cf1"));
lg1.add(new ArrayByteSequence("cf3"));
trf.writer.startNewLocalityGroup("lg1", lg1);
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq2", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf3", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf3", "cq2", "L2", 55), RFileTest.newValue("foo"));
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
AtomicLongMap<String> metrics = vmg.metric.get("lg1");
AtomicLongMap<String> blocks = vmg.blocks.get("lg1");
assertEquals(3, metrics.get("L1"));
assertEquals(1, metrics.get("L2"));
assertEquals(3, blocks.get("L1"));
assertEquals(1, blocks.get("L2"));
assertEquals(4, vmg.numEntries.get(vmg.localityGroups.indexOf("lg1")).longValue());
assertEquals(4, vmg.numBlocks.get(vmg.localityGroups.indexOf("lg1")).longValue());
trf.closeReader();
}
@Test
public void multiBlockMultiCFNonDefaultAndDefaultLocGroup() throws IOException {
// test an rfile with multiple column families and multiple blocks in a non-default locality
// group and the default locality group
trf.openWriter(false, 10);// Each entry is a block
Set<ByteSequence> lg1 = new HashSet<>();
lg1.add(new ArrayByteSequence("cf1"));
lg1.add(new ArrayByteSequence("cf3"));
trf.writer.startNewLocalityGroup("lg1", lg1);
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf1", "cq2", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf3", "cq1", "L1", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf3", "cq2", "L2", 55), RFileTest.newValue("foo"));
trf.writer.startDefaultLocalityGroup();
trf.writer.append(RFileTest.newKey("r1", "cf2", "cq1", "A", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf2", "cq1", "B", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf4", "cq1", "A", 55), RFileTest.newValue("foo"));
trf.writer.append(RFileTest.newKey("r1", "cf4", "cq1", "B", 55), RFileTest.newValue("foo"));
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
AtomicLongMap<String> metrics = vmg.metric.get("lg1");
AtomicLongMap<String> blocks = vmg.blocks.get("lg1");
assertEquals(3, metrics.get("L1"));
assertEquals(1, metrics.get("L2"));
assertEquals(3, blocks.get("L1"));
assertEquals(1, blocks.get("L2"));
assertEquals(4, vmg.numEntries.get(vmg.localityGroups.indexOf("lg1")).longValue());
assertEquals(4, vmg.numBlocks.get(vmg.localityGroups.indexOf("lg1")).longValue());
metrics = vmg.metric.get(null);
blocks = vmg.blocks.get(null);
assertEquals(2, metrics.get("A"));
assertEquals(2, metrics.get("B"));
assertEquals(2, blocks.get("A"));
assertEquals(2, blocks.get("B"));
assertEquals(4, vmg.numEntries.get(vmg.localityGroups.indexOf(null)).longValue());
assertEquals(4, vmg.numBlocks.get(vmg.localityGroups.indexOf(null)).longValue());
trf.closeReader();
}
@Test
public void testManyFamiliesInDefaultLocGroup() throws IOException {
trf.openWriter(false, 1024);
String fam1 = String.format("%06x", 9000);
String fam2 = String.format("%06x", 9001);
Set<ByteSequence> lg1 = new HashSet<>();
lg1.add(new ArrayByteSequence(fam1));
lg1.add(new ArrayByteSequence(fam2));
trf.writer.startNewLocalityGroup("lg1", lg1);
for (int row = 0; row < 1100; row++) {
String rs = String.format("%06x", row);
trf.writer.append(new Key(rs, fam1, "q4", "A", 42L), new Value("v"));
trf.writer.append(new Key(rs, fam2, "q4", "A|B", 42L), new Value("v"));
}
trf.writer.startDefaultLocalityGroup();
String[] vis = {"A", "A&B", "A|C", "B&C", "Boo"};
int fam = 0;
for (int row = 0; row < 1000; row++) {
String rs = String.format("%06x", row);
for (int v = 0; v < 5; v++) {
String fs = String.format("%06x", fam++);
trf.writer.append(new Key(rs, fs, "q4", vis[v], 42L), new Value("v"));
}
}
trf.closeWriter();
trf.openReader(false);
VisMetricsGatherer vmg = trf.gatherMetrics();
Map<String,Long> expected = new HashMap<>();
Map<String,Long> expectedBlocks = new HashMap<>();
for (String v : vis) {
expected.put(v, 1000L);
expectedBlocks.put(v, 71L);
}
assertEquals(expected, vmg.metric.get(null).asMap());
assertEquals(expectedBlocks, vmg.blocks.get(null).asMap());
expected.clear();
expectedBlocks.clear();
expected.put("A", 1100L);
expected.put("A|B", 1100L);
expectedBlocks.put("A", 32L);
expectedBlocks.put("A|B", 32L);
assertEquals(expected, vmg.metric.get("lg1").asMap());
assertEquals(expectedBlocks, vmg.blocks.get("lg1").asMap());
assertEquals(2, vmg.metric.keySet().size());
assertEquals(2, vmg.blocks.keySet().size());
}
}
| 9,335 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UncheckedIOException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.crypto.CryptoFactoryLoader;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachableBuilder;
import org.apache.accumulo.core.file.rfile.MultiLevelIndex.BufferedWriter;
import org.apache.accumulo.core.file.rfile.MultiLevelIndex.IndexEntry;
import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Reader;
import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Reader.IndexIterator;
import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Writer;
import org.apache.accumulo.core.file.rfile.RFileTest.SeekableByteArrayInputStream;
import org.apache.accumulo.core.file.rfile.bcfile.BCFile;
import org.apache.accumulo.core.spi.crypto.CryptoService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.junit.jupiter.api.Test;
public class MultiLevelIndexTest {
private Configuration hadoopConf = new Configuration();
@Test
public void test1() throws Exception {
runTest(500, 1);
runTest(500, 10);
runTest(500, 100);
runTest(500, 1000);
runTest(500, 10000);
runTest(1, 100);
}
private void runTest(int maxBlockSize, int num) throws IOException {
AccumuloConfiguration aconf = DefaultConfiguration.getInstance();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
FSDataOutputStream dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
CryptoService cs = CryptoFactoryLoader.getServiceForServer(aconf);
BCFile.Writer _cbw = new BCFile.Writer(dos, null, "gz", hadoopConf, cs);
BufferedWriter mliw = new BufferedWriter(new Writer(_cbw, maxBlockSize));
for (int i = 0; i < num; i++) {
mliw.add(new Key(String.format("%05d000", i)), i, 0, 0, 0);
}
mliw.addLast(new Key(String.format("%05d000", num)), num, 0, 0, 0);
BCFile.Writer.BlockAppender root = _cbw.prepareMetaBlock("root");
mliw.close(root);
root.close();
_cbw.close();
dos.close();
baos.close();
byte[] data = baos.toByteArray();
SeekableByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
FSDataInputStream in = new FSDataInputStream(bais);
CachableBuilder cb = new CachableBuilder().input(in, "source-1").length(data.length)
.conf(hadoopConf).cryptoService(cs);
CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(cb);
Reader reader = new Reader(_cbr, RFile.RINDEX_VER_8);
CachableBlockFile.CachedBlockRead rootIn = _cbr.getMetaBlock("root");
reader.readFields(rootIn);
rootIn.close();
IndexIterator liter = reader.lookup(new Key("000000"));
int count = 0;
while (liter.hasNext()) {
assertEquals(count, liter.nextIndex());
assertEquals(count, liter.peek().getNumEntries());
assertEquals(count, liter.next().getNumEntries());
count++;
}
assertEquals(num + 1, count);
while (liter.hasPrevious()) {
count--;
assertEquals(count, liter.previousIndex());
assertEquals(count, liter.peekPrevious().getNumEntries());
assertEquals(count, liter.previous().getNumEntries());
}
assertEquals(0, count);
// go past the end
liter = reader.lookup(new Key(String.format("%05d000", num + 1)));
assertFalse(liter.hasNext());
RANDOM.get().ints(100, 0, num * 1_000).forEach(k -> {
int expected;
if (k % 1000 == 0) {
expected = k / 1000; // end key is inclusive
} else {
expected = k / 1000 + 1;
}
try {
IndexEntry ie = reader.lookup(new Key(String.format("%08d", k))).next();
assertEquals(expected, ie.getNumEntries());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
}
}
| 9,336 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/FencedRFileTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.accumulo.core.crypto.CryptoTest;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.FileSKVIterator;
import org.apache.accumulo.core.file.rfile.RFile.FencedIndex;
import org.apache.accumulo.core.file.rfile.RFile.FencedReader;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.IterationInterruptedException;
import org.apache.accumulo.core.iteratorsImpl.system.MultiIterator;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
public class FencedRFileTest extends AbstractRFileTest {
@BeforeAll
public static void setupCryptoKeyFile() throws Exception {
CryptoTest.setupKeyFiles(FencedRFileTest.class);
}
@Test
public void testFencingNoRange() throws IOException {
// Test with infinite start/end range
// Expect entire range to be seen
assertEquals(1024, testFencing(List.of(new Range()), List.of(new Range())));
}
@Test
public void testFencing1() throws IOException {
// Test with fenced starting range at beginning and infinite end range
// Expect entire range to be seen
assertEquals(1024, testFencing(List.of(new Range("r_000000", null)), List.of(new Range())));
}
@Test
public void testFencing2() throws IOException {
// Test with 2 ranges that are continuous which should be merged
// Expect entire all rows to be seen as first range end key is inclusive
assertEquals(1024,
testFencing(List.of(new Range(null, new Key("r_000002")), new Range("r_000002", null)),
List.of(new Range())));
}
@Test
public void testFencing3() throws IOException {
// Create a fence that contains only row 0 row 2
// Expect only to see values from those two rows and not row 1 or row 3
final List<Range> ranges = List.of(new Range("r_000000"), new Range("r_000002"));
// Use the same range for the fence and testing to make sure only the expected keys were seen
// Should only be 512 keys as 2 rows * 256
assertEquals(512, testFencing(ranges, ranges));
}
// Should fail
@Test
public void testFencing4() throws IOException {
// Create a fence that contains row 0 and row 2 only
final List<Range> ranges = List.of(new Range("r_000000"), new Range("r_000002"));
// Expected range contains only row 2 so should fail as row 1 should also be seen
final List<Range> ranges2 = List.of(new Range("r_000002"));
boolean failed = false;
try {
testFencing(ranges, ranges2);
} catch (AssertionError e) {
// expected
failed = true;
}
assertTrue(failed, "should have failed");
}
@Test
public void testFencing5() throws IOException {
// Test all 4 rows individually, should expect entire file
final List<Range> ranges = List.of(new Range("r_000000"), new Range("r_000001"),
new Range("r_000002"), new Range("r_000003"));
assertEquals(1024, testFencing(ranges, List.of(new Range())));
}
@Test
public void testFencing6() throws IOException {
// Set range to 2.5 rows out of 4
// Skip row 0, start row 1 and CF 2 (middle row 1), include row 3/4)
Key start = Key.builder().row("r_000001").family("cf_000002").build();
// Create a fence that starts at partial row 1
final List<Range> ranges = List.of(new Range(start, true, null, true));
// 2.5 rows equals 640 keys as each row contains 256 mutations (1024 total across all 4 rows)
assertEquals(640, testFencing(ranges, ranges));
}
@Test
public void testFencing7() throws IOException {
// Set range to 3/4 of 1 row spanning part of row 1 and row 2
Key start = Key.builder().row("r_000001").family("cf_000002").build();
Key end = Key.builder().row("r_000002").family("cf_000001").build();
// Create a fence
final List<Range> ranges = List.of(new Range(start, true, end, true));
// 3/4 of 1 rows equals 192 keys as each row contains 256 mutations
assertEquals(192, testFencing(ranges, ranges));
}
@Test
public void testFencing8() throws IOException {
// Create a fence for 2 rows
final List<Range> ranges = List.of(new Range("r_000001", true, "r_000002", true));
// Should only be rows 1 and 2
assertEquals(512, testFencing(ranges, ranges));
}
@Test
public void testFencing9() throws IOException {
// Test out of order ranges that should still cover whole file.
final List<Range> ranges = List.of(new Range("r_000002"), new Range("r_000003"),
new Range("r_000000"), new Range("r_000001"));
assertEquals(1024, testFencing(ranges, List.of(new Range())));
}
@Test
public void testFencing10() throws IOException {
// Test overlap 2 rows that are merged
final List<Range> ranges = Range.mergeOverlapping(
List.of(new Range("r_000002"), new Range("r_000002", true, "r_000003", true)));
assertEquals(512, testFencing(ranges, ranges));
}
@Test
public void testFencing11() throws IOException {
// Test fence covering just a single row
final List<Range> ranges = List.of(new Range("r_000001"));
// should be 256 keys in row r_000001
assertEquals(256, testFencing(ranges, ranges));
}
@Test
public void testFencing12() throws IOException {
final TestRFile trf = initTestFile();
// Fence off the file to contain only 1 row (r_00001)
Range range = new Range(new Range("r_000001"));
trf.openReader(range);
// Open a fenced reader
final SortedKeyValueIterator<Key,Value> iter = trf.iter;
assertTrue(iter instanceof FencedReader);
// Seek to the row that is part of the fence
seek(iter, new Key(new Text("r_000001")));
assertTrue(iter.hasTop());
// each row has 256 keys, read 1/4 of the keys
// and verify hasTop() is true
for (int i = 0; i < 64; i++) {
iter.next();
assertTrue(iter.hasTop());
}
// Seek to a range that is disjoint. The fence only covers
// row r_000001 as end row is exclusive so seeking to row r_000002
// should result in hasTop() returning false
seek(iter, new Key(new Text("r_000002")));
// Verify hasTop() is now false
assertFalse(iter.hasTop());
}
@Test
public void testFirstAndLastRow() throws IOException {
final TestRFile trf = initTestFile();
Text firstRowInFile = new Text(formatString("r_", 0));
Text lastRowInFile = new Text(formatString("r_", 3));
// Infinite range fence
// Should just be first/last rows of file
assertReader(trf, new Range(), (reader) -> {
assertEquals(firstRowInFile, reader.getFirstRow());
assertEquals(lastRowInFile, reader.getLastRow());
});
// Range inside of file so should return the rows of the fence
assertReader(trf, new Range("r_000001", "r_000002"), (reader) -> {
assertEquals(new Text("r_000001"), reader.getFirstRow());
assertEquals(new Text("r_000002"), reader.getLastRow());
});
// Test infinite start row
assertReader(trf, new Range(null, "r_000001"), (reader) -> {
assertEquals(firstRowInFile, reader.getFirstRow());
assertEquals(new Text("r_000001"), reader.getLastRow());
});
// Test infinite end row
assertReader(trf, new Range("r_000002", null), (reader) -> {
assertEquals(new Text("r_000002"), reader.getFirstRow());
assertEquals(lastRowInFile, reader.getLastRow());
});
// Test start row matches start of file
assertReader(trf, new Range("r_000000", "r_000002"), (reader) -> {
// start row of range matches first row in file so that should be returned instead
assertEquals(firstRowInFile, reader.getFirstRow());
assertEquals(new Text("r_000002"), reader.getLastRow());
});
// Test end row matches end of file
assertReader(trf, new Range("r_000001", "r_000003"), (reader) -> {
assertEquals(new Text("r_000001"), reader.getFirstRow());
// end row of range matches last row in file so that should be returned instead
assertEquals(lastRowInFile, reader.getLastRow());
});
// Test case where rows in range are less than and greater than rows in file
assertReader(trf, new Range("a", "z"), (reader) -> {
assertEquals(firstRowInFile, reader.getFirstRow());
assertEquals(lastRowInFile, reader.getLastRow());
});
// Test inclusive end key, usually a row range is required to be an exclusive key
// for a tablet file but the fenced reader still supports any range type
assertReader(trf, new Range(new Key("r_000002"), true, new Key("r_000002"), true), (reader) -> {
assertEquals(new Text("r_000002"), reader.getFirstRow());
assertEquals(new Text("r_000002"), reader.getLastRow());
});
}
@Test
public void testUnsupportedMethods() throws IOException {
final TestRFile trf = initTestFile();
trf.openReader(new Range());
FencedReader reader = (FencedReader) trf.iter;
FencedIndex index = (FencedIndex) reader.getIndex();
assertThrows(UnsupportedOperationException.class, () -> reader.init(null, null, null));
assertThrows(UnsupportedOperationException.class,
() -> index.getSample(new SamplerConfigurationImpl()));
assertThrows(UnsupportedOperationException.class,
() -> index.seek(new Range(), List.of(), false));
assertThrows(UnsupportedOperationException.class, () -> index.deepCopy(null));
}
@Test
public void testSetInterrupted() throws IOException {
final TestRFile trf = initTestFile();
trf.openReader(new Range());
FencedReader reader = (FencedReader) trf.iter;
reader.setInterruptFlag(new AtomicBoolean(true));
assertThrows(IterationInterruptedException.class,
() -> reader.seek(new Range("r_000001"), List.of(), false));
}
@Test
public void testReset() throws IOException {
final TestRFile trf = initTestFile();
trf.openReader(new Range());
FencedReader reader = (FencedReader) trf.iter;
assertFalse(reader.hasTop());
reader.seek(new Range("r_000001"), List.of(), false);
assertTrue(reader.hasTop());
assertEquals(
newKey(formatString("r_", 1), formatString("cf_", 0), formatString("cq_", 0), "A", 4),
reader.getTopKey());
reader.reset();
assertFalse(reader.hasTop());
}
private int testFencing(List<Range> fencedRange, List<Range> expectedRange) throws IOException {
// test an rfile with multiple rows having multiple columns
final ArrayList<Key> expectedKeys = new ArrayList<>(10000);
final ArrayList<Value> expectedValues = new ArrayList<>(10000);
final List<TestRFile> rangedTrfs = new ArrayList<>();
final List<SortedKeyValueIterator<Key,Value>> rangedIters = new ArrayList<>();
// For each range build a new test rfile and ranged reader for it
// We have to copy the data for each range for the test
for (Range range : fencedRange) {
expectedKeys.clear();
expectedValues.clear();
final TestRFile trf = new TestRFile(conf);
trf.openWriter();
writeTestFile(trf, expectedKeys, expectedValues, expectedRange);
trf.closeWriter();
rangedTrfs.add(trf);
trf.openReader(range);
rangedIters.add(trf.iter);
}
final MultiIterator trfIter = new MultiIterator(rangedIters, false);
// seek before everything
trfIter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
verify(trfIter, expectedKeys.iterator(), expectedValues.iterator());
// seek to the middle
int index = expectedKeys.size() / 2;
seek(trfIter, expectedKeys.get(index));
verify(trfIter, expectedKeys.subList(index, expectedKeys.size()).iterator(),
expectedValues.subList(index, expectedKeys.size()).iterator());
// seek the first key
index = 0;
seek(trfIter, expectedKeys.get(index));
verify(trfIter, expectedKeys.subList(index, expectedKeys.size()).iterator(),
expectedValues.subList(index, expectedKeys.size()).iterator());
// seek to the last key
index = expectedKeys.size() - 1;
seek(trfIter, expectedKeys.get(index));
verify(trfIter, expectedKeys.subList(index, expectedKeys.size()).iterator(),
expectedValues.subList(index, expectedKeys.size()).iterator());
// seek after everything
index = expectedKeys.size();
seek(trfIter, new Key(new Text("z")));
verify(trfIter, expectedKeys.subList(index, expectedKeys.size()).iterator(),
expectedValues.subList(index, expectedKeys.size()).iterator());
// test seeking to the current location
index = expectedKeys.size() / 2;
seek(trfIter, expectedKeys.get(index));
assertTrue(trfIter.hasTop());
assertEquals(expectedKeys.get(index), trfIter.getTopKey());
assertEquals(expectedValues.get(index), trfIter.getTopValue());
trfIter.next();
index++;
assertTrue(trfIter.hasTop());
assertEquals(expectedKeys.get(index), trfIter.getTopKey());
assertEquals(expectedValues.get(index), trfIter.getTopValue());
seek(trfIter, expectedKeys.get(index));
assertTrue(trfIter.hasTop());
assertEquals(expectedKeys.get(index), trfIter.getTopKey());
assertEquals(expectedValues.get(index), trfIter.getTopValue());
// test seeking to each location in the file
index = 0;
for (Key key : expectedKeys) {
seek(trfIter, key);
assertTrue(trfIter.hasTop());
assertEquals(key, trfIter.getTopKey());
assertEquals(expectedValues.get(index), trfIter.getTopValue());
index++;
}
// test seeking backwards to each key
for (int i = expectedKeys.size() - 1; i >= 0; i--) {
Key key = expectedKeys.get(i);
seek(trfIter, key);
assertTrue(trfIter.hasTop());
assertEquals(key, trfIter.getTopKey());
assertEquals(expectedValues.get(i), trfIter.getTopValue());
if (i - 1 > 0) {
// Key pkey =
expectedKeys.get(i - 1);
// assertEquals(pkey, trf.reader.getPrevKey());
}
}
// test seeking to random location and reading all data from that point
// there was an off by one bug with this in the transient index
for (int i = 0; i < 12; i++) {
index = random.nextInt(expectedKeys.size());
seek(trfIter, expectedKeys.get(index));
for (; index < expectedKeys.size(); index++) {
assertTrue(trfIter.hasTop());
assertEquals(expectedKeys.get(index), trfIter.getTopKey());
assertEquals(expectedValues.get(index), trfIter.getTopValue());
trfIter.next();
}
}
for (TestRFile rangedTrf : rangedTrfs) {
// check index entries to verify within range
FileSKVIterator iiter = ((FencedReader) rangedTrf.iter).getIndex();
while (iiter.hasTop()) {
assertTrue(expectedRange.stream().anyMatch(range -> range.contains(iiter.getTopKey())));
iiter.next();
}
rangedTrf.closeReader();
}
return expectedKeys.size();
}
private static void seek(SortedKeyValueIterator<Key,Value> iter, Key nk) throws IOException {
iter.seek(new Range(nk, null), EMPTY_COL_FAMS, false);
}
private void writeTestFile(final TestRFile trf) throws IOException {
writeTestFile(trf, null, null, null);
}
private void writeTestFile(final TestRFile trf, final List<Key> expectedKeys,
final List<Value> expectedValues, List<Range> expectedRange) throws IOException {
int val = 0;
for (int row = 0; row < 4; row++) {
String rowS = formatString("r_", row);
for (int cf = 0; cf < 4; cf++) {
String cfS = formatString("cf_", cf);
for (int cq = 0; cq < 4; cq++) {
String cqS = formatString("cq_", cq);
for (int cv = 'A'; cv < 'A' + 4; cv++) {
String cvS = "" + (char) cv;
for (int ts = 4; ts > 0; ts--) {
Key k = newKey(rowS, cfS, cqS, cvS, ts);
// check below ensures when all key sizes are same more than one index block is
// created
assertEquals(27, k.getSize());
Value v = newValue("" + val);
trf.writer.append(k, v);
final Key finalK = k;
Optional.ofNullable(expectedRange).ifPresent(expected -> {
if (expected.stream().anyMatch(range -> range.contains(finalK))) {
expectedKeys.add(k);
expectedValues.add(v);
}
});
val++;
}
}
}
}
}
}
private TestRFile initTestFile() throws IOException {
final TestRFile trf = new TestRFile(conf);
trf.openWriter();
writeTestFile(trf);
trf.closeWriter();
return trf;
}
private static void assertReader(final TestRFile trf, Range range,
ThrowableConsumer<FencedReader,IOException> run) throws IOException {
FencedReader reader = null;
try {
trf.openReader(range);
reader = (FencedReader) trf.iter;
run.accept(reader);
} finally {
if (reader != null) {
reader.close();
}
}
}
// Similar to the java.util.function.Consumer interface but throws an exception
interface ThrowableConsumer<T,U extends Throwable> {
void accept(T t) throws U;
}
}
| 9,337 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/CreateCompatTestFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import java.util.HashSet;
import java.util.Set;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.crypto.CryptoFactoryLoader;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.rfile.bcfile.BCFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class CreateCompatTestFile {
public static Set<ByteSequence> newColFamSequence(String... colFams) {
HashSet<ByteSequence> cfs = new HashSet<>();
for (String cf : colFams) {
cfs.add(new ArrayByteSequence(cf));
}
return cfs;
}
private static Key newKey(String row, String cf, String cq, String cv, long ts) {
return new Key(row.getBytes(), cf.getBytes(), cq.getBytes(), cv.getBytes(), ts);
}
private static Value newValue(String val) {
return new Value(val);
}
private static String formatStr(String prefix, int i) {
return String.format(prefix + "%06d", i);
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
AccumuloConfiguration aconf = DefaultConfiguration.getInstance();
BCFile.Writer _cbw = new BCFile.Writer(fs.create(new Path(args[0])), null, "gz", conf,
CryptoFactoryLoader.getServiceForServer(aconf));
RFile.Writer writer = new RFile.Writer(_cbw, 1000);
writer.startNewLocalityGroup("lg1",
newColFamSequence(formatStr("cf_", 1), formatStr("cf_", 2)));
for (int i = 0; i < 1000; i++) {
writer.append(
newKey(formatStr("r_", i), formatStr("cf_", 1), formatStr("cq_", 0), "", 1000 - i),
newValue(i + ""));
writer.append(
newKey(formatStr("r_", i), formatStr("cf_", 2), formatStr("cq_", 0), "", 1000 - i),
newValue(i + ""));
}
writer.startNewLocalityGroup("lg2", newColFamSequence(formatStr("cf_", 3)));
for (int i = 0; i < 1000; i++) {
writer.append(
newKey(formatStr("r_", i), formatStr("cf_", 3), formatStr("cq_", 0), "", 1000 - i),
newValue(i + ""));
}
writer.startDefaultLocalityGroup();
for (int i = 0; i < 1000; i++) {
writer.append(
newKey(formatStr("r_", i), formatStr("cf_", 4), formatStr("cq_", 0), "", 1000 - i),
newValue(i + ""));
}
writer.close();
_cbw.close();
}
}
| 9,338 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/RelativeKeyTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.rfile.RelativeKey.SkippR;
import org.apache.accumulo.core.util.MutableByteSequence;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class RelativeKeyTest {
@Test
public void testCommonPrefix() {
// exact matches
ArrayByteSequence exact = new ArrayByteSequence("abc");
assertEquals(-1, RelativeKey.getCommonPrefix(exact, exact));
assertEquals(-1, commonPrefixHelper("", ""));
assertEquals(-1, commonPrefixHelper("a", "a"));
assertEquals(-1, commonPrefixHelper("aa", "aa"));
assertEquals(-1, commonPrefixHelper("aaa", "aaa"));
assertEquals(-1, commonPrefixHelper("abab", "abab"));
assertEquals(-1,
commonPrefixHelper(new String("aaa"), new ArrayByteSequence("aaa").toString()));
assertEquals(-1,
commonPrefixHelper("abababababab".substring(3, 6), "ccababababcc".substring(3, 6)));
// no common prefix
assertEquals(0, commonPrefixHelper("", "a"));
assertEquals(0, commonPrefixHelper("a", ""));
assertEquals(0, commonPrefixHelper("a", "b"));
assertEquals(0, commonPrefixHelper("aaaa", "bbbb"));
// some common prefix
assertEquals(1, commonPrefixHelper("a", "ab"));
assertEquals(1, commonPrefixHelper("ab", "ac"));
assertEquals(1, commonPrefixHelper("ab", "ac"));
assertEquals(2, commonPrefixHelper("aa", "aaaa"));
assertEquals(4, commonPrefixHelper("aaaaa", "aaaab"));
}
private int commonPrefixHelper(String a, String b) {
return RelativeKey.getCommonPrefix(new ArrayByteSequence(a), new ArrayByteSequence(b));
}
@Test
public void testReadWritePrefix() throws IOException {
Key prevKey = new Key("row1", "columnfamily1", "columnqualifier1", "columnvisibility1", 1000);
Key newKey = new Key("row2", "columnfamily2", "columnqualifier2", "columnvisibility2", 3000);
RelativeKey expected = new RelativeKey(prevKey, newKey);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
expected.write(out);
RelativeKey actual = new RelativeKey();
actual.setPrevKey(prevKey);
actual.readFields(new DataInputStream(new ByteArrayInputStream(baos.toByteArray())));
assertEquals(expected.getKey(), actual.getKey());
}
private static ArrayList<Key> expectedKeys;
private static ArrayList<Value> expectedValues;
private static ArrayList<Integer> expectedPositions;
private static ByteArrayOutputStream baos;
@BeforeAll
public static void initSource() throws IOException {
int initialListSize = 10000;
baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
expectedKeys = new ArrayList<>(initialListSize);
expectedValues = new ArrayList<>(initialListSize);
expectedPositions = new ArrayList<>(initialListSize);
Key prev = null;
int val = 0;
for (int row = 0; row < 4; row++) {
String rowS = RFileTest.formatString("r_", row);
for (int cf = 0; cf < 4; cf++) {
String cfS = RFileTest.formatString("cf_", cf);
for (int cq = 0; cq < 4; cq++) {
String cqS = RFileTest.formatString("cq_", cq);
for (int cv = 'A'; cv < 'A' + 4; cv++) {
String cvS = "" + (char) cv;
for (int ts = 4; ts > 0; ts--) {
Key k = RFileTest.newKey(rowS, cfS, cqS, cvS, ts);
k.setDeleted(true);
Value v = RFileTest.newValue("" + val);
expectedPositions.add(out.size());
new RelativeKey(prev, k).write(out);
prev = k;
v.write(out);
expectedKeys.add(k);
expectedValues.add(v);
k = RFileTest.newKey(rowS, cfS, cqS, cvS, ts);
v = RFileTest.newValue("" + val);
expectedPositions.add(out.size());
new RelativeKey(prev, k).write(out);
prev = k;
v.write(out);
expectedKeys.add(k);
expectedValues.add(v);
val++;
}
}
}
}
}
}
private DataInputStream in;
@BeforeEach
public void setupDataInputStream() {
in = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
in.mark(0);
}
@Test
public void testSeekBeforeEverything() throws IOException {
Key seekKey = new Key();
Key prevKey = new Key();
Key currKey = null;
MutableByteSequence value = new MutableByteSequence(new byte[64], 0, 0);
RelativeKey.SkippR skippr =
RelativeKey.fastSkip(in, seekKey, value, prevKey, currKey, expectedKeys.size());
assertEquals(1, skippr.skipped);
assertEquals(new Key(), skippr.prevKey);
assertEquals(expectedKeys.get(0), skippr.rk.getKey());
assertEquals(expectedValues.get(0).toString(), value.toString());
// ensure we can advance after fastskip
skippr.rk.readFields(in);
assertEquals(expectedKeys.get(1), skippr.rk.getKey());
in.reset();
seekKey = new Key("a", "b", "c", "d", 1);
seekKey.setDeleted(true);
skippr = RelativeKey.fastSkip(in, seekKey, value, prevKey, currKey, expectedKeys.size());
assertEquals(1, skippr.skipped);
assertEquals(new Key(), skippr.prevKey);
assertEquals(expectedKeys.get(0), skippr.rk.getKey());
assertEquals(expectedValues.get(0).toString(), value.toString());
skippr.rk.readFields(in);
assertEquals(expectedKeys.get(1), skippr.rk.getKey());
}
@Test
public void testSeekAfterEverythingWrongCount() {
Key seekKey = new Key("s", "t", "u", "v", 1);
Key prevKey = new Key();
Key currKey = null;
MutableByteSequence value = new MutableByteSequence(new byte[64], 0, 0);
assertThrows(EOFException.class,
() -> RelativeKey.fastSkip(in, seekKey, value, prevKey, currKey, expectedKeys.size() + 1));
}
@Test
public void testSeekAfterEverything() throws IOException {
Key seekKey = new Key("s", "t", "u", "v", 1);
Key prevKey = new Key();
Key currKey = null;
MutableByteSequence value = new MutableByteSequence(new byte[64], 0, 0);
SkippR skippr = RelativeKey.fastSkip(in, seekKey, value, prevKey, currKey, expectedKeys.size());
assertEquals(expectedKeys.size(), skippr.skipped);
}
@Test
public void testSeekMiddle() throws IOException {
int seekIndex = expectedKeys.size() / 2;
Key seekKey = expectedKeys.get(seekIndex);
Key prevKey = new Key();
Key currKey = null;
MutableByteSequence value = new MutableByteSequence(new byte[64], 0, 0);
RelativeKey.SkippR skippr =
RelativeKey.fastSkip(in, seekKey, value, prevKey, currKey, expectedKeys.size());
assertEquals(seekIndex + 1, skippr.skipped);
assertEquals(expectedKeys.get(seekIndex - 1), skippr.prevKey);
assertEquals(expectedKeys.get(seekIndex), skippr.rk.getKey());
assertEquals(expectedValues.get(seekIndex).toString(), value.toString());
skippr.rk.readFields(in);
assertEquals(expectedValues.get(seekIndex + 1).toString(), value.toString());
// try fast skipping to a key that does not exist
in.reset();
Key fKey = expectedKeys.get(seekIndex).followingKey(PartialKey.ROW_COLFAM_COLQUAL);
int i;
for (i = seekIndex; expectedKeys.get(i).compareTo(fKey) < 0; i++) {}
int left = expectedKeys.size();
skippr =
RelativeKey.fastSkip(in, expectedKeys.get(i), value, prevKey, currKey, expectedKeys.size());
assertEquals(i + 1, skippr.skipped);
left -= skippr.skipped;
assertEquals(expectedKeys.get(i - 1), skippr.prevKey);
assertEquals(expectedKeys.get(i), skippr.rk.getKey());
assertEquals(expectedValues.get(i).toString(), value.toString());
// try fast skipping to our current location
skippr = RelativeKey.fastSkip(in, expectedKeys.get(i), value, expectedKeys.get(i - 1),
expectedKeys.get(i), left);
assertEquals(0, skippr.skipped);
assertEquals(expectedKeys.get(i - 1), skippr.prevKey);
assertEquals(expectedKeys.get(i), skippr.rk.getKey());
assertEquals(expectedValues.get(i).toString(), value.toString());
// try fast skipping 1 column family ahead from our current location, testing fastskip from
// middle of block as opposed to stating at beginning of block
fKey = expectedKeys.get(i).followingKey(PartialKey.ROW_COLFAM);
int j;
for (j = i; expectedKeys.get(j).compareTo(fKey) < 0; j++) {}
skippr =
RelativeKey.fastSkip(in, fKey, value, expectedKeys.get(i - 1), expectedKeys.get(i), left);
assertEquals(j - i, skippr.skipped);
assertEquals(expectedKeys.get(j - 1), skippr.prevKey);
assertEquals(expectedKeys.get(j), skippr.rk.getKey());
assertEquals(expectedValues.get(j).toString(), value.toString());
}
}
| 9,339 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/BlockIndexTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.function.Supplier;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
import org.apache.accumulo.core.file.rfile.BlockIndex.BlockIndexEntry;
import org.apache.accumulo.core.file.rfile.MultiLevelIndex.IndexEntry;
import org.apache.accumulo.core.spi.cache.CacheEntry;
import org.junit.jupiter.api.Test;
public class BlockIndexTest {
private static class MyCacheEntry implements CacheEntry {
Object idx;
byte[] data;
MyCacheEntry(byte[] d) {
this.data = d;
}
@SuppressWarnings("unchecked")
@Override
public <T extends Weighable> T getIndex(Supplier<T> indexSupplier) {
if (idx == null) {
idx = indexSupplier.get();
}
return (T) idx;
}
@Override
public byte[] getBuffer() {
return data;
}
@Override
public void indexWeightChanged() {}
}
@Test
public void test1() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
Key prevKey = null;
int num = 1000;
for (int i = 0; i < num; i++) {
Key key = new Key(RFileTest.formatString("", i), "cf1", "cq1");
new RelativeKey(prevKey, key).write(out);
new Value().write(out);
prevKey = key;
}
out.close();
final byte[] data = baos.toByteArray();
CacheEntry ce = new MyCacheEntry(data);
CachableBlockFile.CachedBlockRead cacheBlock = new CachableBlockFile.CachedBlockRead(ce, data);
BlockIndex blockIndex = null;
for (int i = 0; i < 129; i++) {
blockIndex = BlockIndex.getIndex(cacheBlock, new IndexEntry(prevKey, num, 0, 0, 0));
}
BlockIndexEntry[] indexEntries = blockIndex.getIndexEntries();
for (int i = 0; i < indexEntries.length; i++) {
int row = Integer.parseInt(indexEntries[i].getPrevKey().getRowData().toString());
BlockIndexEntry bie;
bie =
blockIndex.seekBlock(new Key(RFileTest.formatString("", row), "cf1", "cq1"), cacheBlock);
if (i == 0) {
assertSame(null, bie);
} else {
assertSame(indexEntries[i - 1], bie);
}
assertSame(bie, blockIndex
.seekBlock(new Key(RFileTest.formatString("", row - 1), "cf1", "cq1"), cacheBlock));
bie = blockIndex.seekBlock(new Key(RFileTest.formatString("", row + 1), "cf1", "cq1"),
cacheBlock);
assertSame(indexEntries[i], bie);
RelativeKey rk = new RelativeKey();
rk.setPrevKey(bie.getPrevKey());
rk.readFields(cacheBlock);
assertEquals(rk.getKey(), new Key(RFileTest.formatString("", row + 1), "cf1", "cq1"));
}
cacheBlock.close();
}
@Test
public void testSame() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
Key prevKey = null;
int num = 1000;
for (int i = 0; i < num; i++) {
Key key = new Key(RFileTest.formatString("", 1), "cf1", "cq1");
new RelativeKey(prevKey, key).write(out);
new Value().write(out);
prevKey = key;
}
for (int i = 0; i < num; i++) {
Key key = new Key(RFileTest.formatString("", 3), "cf1", "cq1");
new RelativeKey(prevKey, key).write(out);
new Value().write(out);
prevKey = key;
}
for (int i = 0; i < num; i++) {
Key key = new Key(RFileTest.formatString("", 5), "cf1", "cq1");
new RelativeKey(prevKey, key).write(out);
new Value().write(out);
prevKey = key;
}
out.close();
final byte[] data = baos.toByteArray();
CacheEntry ce = new MyCacheEntry(data);
CachableBlockFile.CachedBlockRead cacheBlock = new CachableBlockFile.CachedBlockRead(ce, data);
BlockIndex blockIndex = null;
for (int i = 0; i < 257; i++) {
blockIndex = BlockIndex.getIndex(cacheBlock, new IndexEntry(prevKey, num, 0, 0, 0));
}
assertSame(null,
blockIndex.seekBlock(new Key(RFileTest.formatString("", 0), "cf1", "cq1"), cacheBlock));
assertSame(null,
blockIndex.seekBlock(new Key(RFileTest.formatString("", 1), "cf1", "cq1"), cacheBlock));
for (int i = 2; i < 6; i++) {
Key seekKey = new Key(RFileTest.formatString("", i), "cf1", "cq1");
BlockIndexEntry bie = blockIndex.seekBlock(seekKey, cacheBlock);
assertTrue(bie.getPrevKey().compareTo(seekKey) < 0);
RelativeKey rk = new RelativeKey();
rk.setPrevKey(bie.getPrevKey());
rk.readFields(cacheBlock);
assertTrue(rk.getKey().compareTo(seekKey) <= 0);
}
cacheBlock.close();
}
}
| 9,340 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/AbstractRFileTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.security.SecureRandom;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.accumulo.core.client.sample.Sampler;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.crypto.CryptoFactoryLoader;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.FileSKVIterator;
import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheConfiguration;
import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheManagerFactory;
import org.apache.accumulo.core.file.blockfile.cache.lru.LruBlockCache;
import org.apache.accumulo.core.file.blockfile.cache.lru.LruBlockCacheManager;
import org.apache.accumulo.core.file.blockfile.impl.BasicCacheProvider;
import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachableBuilder;
import org.apache.accumulo.core.file.rfile.RFile.FencedReader;
import org.apache.accumulo.core.file.rfile.RFile.Reader;
import org.apache.accumulo.core.file.rfile.RFileTest.SeekableByteArrayInputStream;
import org.apache.accumulo.core.file.rfile.bcfile.BCFile;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iteratorsImpl.system.ColumnFamilySkippingIterator;
import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
import org.apache.accumulo.core.sample.impl.SamplerFactory;
import org.apache.accumulo.core.spi.cache.BlockCacheManager;
import org.apache.accumulo.core.spi.cache.CacheType;
import org.apache.accumulo.core.spi.crypto.CryptoEnvironment;
import org.apache.accumulo.core.spi.crypto.CryptoService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
public abstract class AbstractRFileTest {
protected static final SecureRandom random = new SecureRandom();
protected static final Collection<ByteSequence> EMPTY_COL_FAMS = List.of();
protected AccumuloConfiguration conf = null;
public static class TestRFile {
protected Configuration conf = new Configuration();
public RFile.Writer writer;
protected ByteArrayOutputStream baos;
protected FSDataOutputStream dos;
protected SeekableByteArrayInputStream bais;
protected FSDataInputStream in;
protected AccumuloConfiguration accumuloConfiguration;
public Reader reader;
public SortedKeyValueIterator<Key,Value> iter;
private BlockCacheManager manager;
public TestRFile(AccumuloConfiguration accumuloConfiguration) {
this.accumuloConfiguration = accumuloConfiguration;
if (this.accumuloConfiguration == null) {
this.accumuloConfiguration = DefaultConfiguration.getInstance();
}
}
public void openWriter(boolean startDLG) throws IOException {
openWriter(startDLG, 1000);
}
public void openWriter(boolean startDLG, int blockSize) throws IOException {
baos = new ByteArrayOutputStream();
dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
CryptoService cs = CryptoFactoryLoader.getServiceForClient(CryptoEnvironment.Scope.TABLE,
accumuloConfiguration.getAllCryptoProperties());
BCFile.Writer _cbw = new BCFile.Writer(dos, null, "gz", conf, cs);
SamplerConfigurationImpl samplerConfig =
SamplerConfigurationImpl.newSamplerConfig(accumuloConfiguration);
Sampler sampler = null;
if (samplerConfig != null) {
sampler = SamplerFactory.newSampler(samplerConfig, accumuloConfiguration);
}
writer = new RFile.Writer(_cbw, blockSize, 1000, samplerConfig, sampler);
if (startDLG) {
writer.startDefaultLocalityGroup();
}
}
public void openWriter() throws IOException {
openWriter(1000);
}
public void openWriter(int blockSize) throws IOException {
openWriter(true, blockSize);
}
public void closeWriter() throws IOException {
dos.flush();
writer.close();
dos.close();
if (baos != null) {
baos.close();
}
}
public void openReader(Range fence) throws IOException {
openReader(true, fence);
}
public void openReader() throws IOException {
openReader(true);
}
public void openReader(boolean cfsi) throws IOException {
openReader(cfsi, null);
}
public void openReader(boolean cfsi, Range fence) throws IOException {
int fileLength = 0;
byte[] data = null;
data = baos.toByteArray();
bais = new SeekableByteArrayInputStream(data);
in = new FSDataInputStream(bais);
fileLength = data.length;
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
try {
manager = BlockCacheManagerFactory.getInstance(cc);
} catch (ReflectiveOperationException e) {
throw new IllegalStateException("Error creating BlockCacheManager", e);
}
cc.set(Property.TSERV_DEFAULT_BLOCKSIZE, Long.toString(100000));
cc.set(Property.TSERV_DATACACHE_SIZE, Long.toString(100000000));
cc.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(100000000));
manager.start(BlockCacheConfiguration.forTabletServer(cc));
LruBlockCache indexCache = (LruBlockCache) manager.getBlockCache(CacheType.INDEX);
LruBlockCache dataCache = (LruBlockCache) manager.getBlockCache(CacheType.DATA);
CryptoService cs = CryptoFactoryLoader.getServiceForClient(CryptoEnvironment.Scope.TABLE,
accumuloConfiguration.getAllCryptoProperties());
CachableBuilder cb = new CachableBuilder().input(in, "source-1").length(fileLength).conf(conf)
.cacheProvider(new BasicCacheProvider(indexCache, dataCache)).cryptoService(cs);
reader = new RFile.Reader(cb);
if (cfsi) {
iter = new ColumnFamilySkippingIterator(reader);
}
if (fence != null) {
iter = new FencedReader(reader, fence);
}
checkIndex(reader);
}
public void closeReader() throws IOException {
reader.close();
in.close();
if (null != manager) {
manager.stop();
}
}
public void seek(Key nk) throws IOException {
iter.seek(new Range(nk, null), EMPTY_COL_FAMS, false);
}
}
protected static void checkIndex(Reader reader) throws IOException {
FileSKVIterator indexIter = reader.getIndex();
if (indexIter.hasTop()) {
Key lastKey = new Key(indexIter.getTopKey());
if (reader.getFirstRow().compareTo(lastKey.getRow()) > 0) {
throw new IllegalStateException(
"First key out of order " + reader.getFirstRow() + " " + lastKey);
}
indexIter.next();
while (indexIter.hasTop()) {
if (lastKey.compareTo(indexIter.getTopKey()) > 0) {
throw new IllegalStateException(
"Indext out of order " + lastKey + " " + indexIter.getTopKey());
}
lastKey = new Key(indexIter.getTopKey());
indexIter.next();
}
if (!reader.getLastRow().equals(lastKey.getRow())) {
throw new IllegalStateException(
"Last key out of order " + reader.getLastRow() + " " + lastKey);
}
}
}
static Key newKey(String row, String cf, String cq, String cv, long ts) {
return new Key(row.getBytes(), cf.getBytes(), cq.getBytes(), cv.getBytes(), ts);
}
static Value newValue(String val) {
return new Value(val);
}
static String formatString(String prefix, int i) {
return String.format(prefix + "%06d", i);
}
protected void verify(TestRFile trf, Iterator<Key> eki, Iterator<Value> evi) throws IOException {
verify(trf.iter, eki, evi);
}
protected void verify(SortedKeyValueIterator<Key,Value> iter, Iterator<Key> eki,
Iterator<Value> evi) throws IOException {
while (iter.hasTop()) {
Key ek = eki.next();
Value ev = evi.next();
assertEquals(ek, iter.getTopKey());
assertEquals(ev, iter.getTopValue());
iter.next();
}
assertFalse(eki.hasNext());
assertFalse(evi.hasNext());
}
}
| 9,341 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/bcfile/CompressionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile.bcfile;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.accumulo.core.spi.file.rfile.compression.Bzip2;
import org.apache.accumulo.core.spi.file.rfile.compression.Gz;
import org.apache.accumulo.core.spi.file.rfile.compression.Lz4;
import org.apache.accumulo.core.spi.file.rfile.compression.Lzo;
import org.apache.accumulo.core.spi.file.rfile.compression.Snappy;
import org.apache.accumulo.core.spi.file.rfile.compression.ZStandard;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.SnappyCodec;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
public class CompressionTest {
HashMap<CompressionAlgorithm,Boolean> isSupported = new HashMap<>();
@BeforeEach
public void testSupport() throws ClassNotFoundException {
Configuration myConf = new Configuration();
Gz gz = new Gz();
String extClazz = gz.getCodecClassNameProperty();
String clazz = (extClazz != null) ? extClazz : gz.getCodecClassName();
CompressionCodec codec =
(CompressionCodec) ReflectionUtils.newInstance(Class.forName(clazz), myConf);
assertNotNull(codec);
isSupported.put(new CompressionAlgorithm(gz, myConf), true);
Lzo lzo = new Lzo();
extClazz = lzo.getCodecClassNameProperty();
clazz = (extClazz != null) ? extClazz : lzo.getCodecClassName();
try {
codec = (CompressionCodec) ReflectionUtils.newInstance(Class.forName(clazz), myConf);
assertNotNull(codec);
isSupported.put(new CompressionAlgorithm(lzo, myConf), true);
} catch (ClassNotFoundException e) {
// that is okay
}
Lz4 lz4 = new Lz4();
extClazz = lz4.getCodecClassNameProperty();
clazz = (extClazz != null) ? extClazz : lz4.getCodecClassName();
try {
codec = (CompressionCodec) ReflectionUtils.newInstance(Class.forName(clazz), myConf);
assertNotNull(codec);
isSupported.put(new CompressionAlgorithm(lz4, myConf), true);
} catch (ClassNotFoundException e) {
// that is okay
}
Bzip2 bzip = new Bzip2();
extClazz = bzip.getCodecClassNameProperty();
clazz = (extClazz != null) ? extClazz : bzip.getCodecClassName();
try {
codec = (CompressionCodec) ReflectionUtils.newInstance(Class.forName(clazz), myConf);
assertNotNull(codec);
isSupported.put(new CompressionAlgorithm(bzip, myConf), true);
} catch (ClassNotFoundException e) {
// that is okay
}
Snappy snappy = new Snappy();
extClazz = snappy.getCodecClassNameProperty();
clazz = (extClazz != null) ? extClazz : snappy.getCodecClassName();
try {
codec = (CompressionCodec) ReflectionUtils.newInstance(Class.forName(clazz), myConf);
assertNotNull(codec);
isSupported.put(new CompressionAlgorithm(snappy, myConf), true);
} catch (ClassNotFoundException e) {
// that is okay
}
ZStandard zstd = new ZStandard();
extClazz = zstd.getCodecClassNameProperty();
clazz = (extClazz != null) ? extClazz : zstd.getCodecClassName();
try {
codec = (CompressionCodec) ReflectionUtils.newInstance(Class.forName(clazz), myConf);
assertNotNull(codec);
isSupported.put(new CompressionAlgorithm(zstd, myConf), true);
} catch (ClassNotFoundException e) {
// that is okay
}
}
@Test
public void testSingle() {
for (final String name : Compression.getSupportedAlgorithms()) {
CompressionAlgorithm al = Compression.getCompressionAlgorithmByName(name);
if (isSupported.get(al) != null && isSupported.get(al)) {
// first call to isSupported should be true
assertTrue(al.isSupported(), al + " is not supported, but should be");
assertNotNull(al.getCodec(), al + " should have a non-null codec");
assertNotNull(al.getCodec(), al + " should have a non-null codec");
}
}
}
@Test
public void testSingleNoSideEffect() {
for (final String name : Compression.getSupportedAlgorithms()) {
CompressionAlgorithm al = Compression.getCompressionAlgorithmByName(name);
if (isSupported.get(al) != null && isSupported.get(al)) {
assertTrue(al.isSupported(), al + " is not supported, but should be");
assertNotNull(al.getCodec(), al + " should have a non-null codec");
// assert that additional calls to create will not create
// additional codecs
assertNotEquals(System.identityHashCode(al.getCodec()), al.createNewCodec(88 * 1024),
al + " should have created a new codec, but did not");
}
}
}
@Test
@Timeout(60)
public void testManyStartNotNull() throws InterruptedException, ExecutionException {
for (final String name : Compression.getSupportedAlgorithms()) {
CompressionAlgorithm al = Compression.getCompressionAlgorithmByName(name);
if (isSupported.get(al) != null && isSupported.get(al)) {
// first call to isSupported should be true
assertTrue(al.isSupported(), al + " is not supported, but should be");
final CompressionCodec codec = al.getCodec();
assertNotNull(codec, al + " should not be null");
ExecutorService service = Executors.newFixedThreadPool(10);
ArrayList<Future<Boolean>> results = new ArrayList<>();
for (int i = 0; i < 30; i++) {
results.add(service.submit(() -> {
assertNotNull(al.getCodec(), al + " should not be null");
return true;
}));
}
service.shutdown();
assertNotNull(codec, al + " should not be null");
while (!service.awaitTermination(1, SECONDS)) {
// wait
}
for (Future<Boolean> result : results) {
assertTrue(result.get(),
al + " resulted in a failed call to getcodec within the thread pool");
}
}
}
}
// don't start until we have created the codec
@Test
@Timeout(60)
public void testManyDontStartUntilThread() throws InterruptedException, ExecutionException {
for (final String name : Compression.getSupportedAlgorithms()) {
CompressionAlgorithm al = Compression.getCompressionAlgorithmByName(name);
if (isSupported.get(al) != null && isSupported.get(al)) {
// first call to isSupported should be true
assertTrue(al.isSupported(), al + " is not supported, but should be");
ExecutorService service = Executors.newFixedThreadPool(10);
ArrayList<Future<Boolean>> results = new ArrayList<>();
for (int i = 0; i < 30; i++) {
results.add(service.submit(() -> {
assertNotNull(al.getCodec(), al + " should have a non-null codec");
return true;
}));
}
service.shutdown();
while (!service.awaitTermination(1, SECONDS)) {
// wait
}
for (Future<Boolean> result : results) {
assertTrue(result.get(),
al + " resulted in a failed call to getcodec within the thread pool");
}
}
}
}
@Test
@Timeout(60)
public void testThereCanBeOnlyOne() throws InterruptedException, ExecutionException {
for (final String name : Compression.getSupportedAlgorithms()) {
CompressionAlgorithm al = Compression.getCompressionAlgorithmByName(name);
if (isSupported.get(al) != null && isSupported.get(al)) {
// first call to isSupported should be true
assertTrue(al.isSupported(), al + " is not supported, but should be");
ExecutorService service = Executors.newFixedThreadPool(20);
ArrayList<Callable<Boolean>> list = new ArrayList<>();
ArrayList<Future<Boolean>> results = new ArrayList<>();
// keep track of the system's identity hashcodes.
final HashSet<Integer> testSet = new HashSet<>();
for (int i = 0; i < 40; i++) {
list.add(() -> {
CompressionCodec codec = al.getCodec();
assertNotNull(codec, al + " resulted in a non-null codec");
// add the identity hashcode to the set.
synchronized (testSet) {
testSet.add(System.identityHashCode(codec));
}
return true;
});
}
results.addAll(service.invokeAll(list));
// ensure that we
assertEquals(1, testSet.size(), al + " created too many codecs");
service.shutdown();
while (!service.awaitTermination(1, SECONDS)) {
// wait
}
for (Future<Boolean> result : results) {
assertTrue(result.get(),
al + " resulted in a failed call to getcodec within the thread pool");
}
}
}
}
@Test
public void testHadoopCodecOverride() {
Configuration conf = new Configuration(false);
conf.set(new ZStandard().getCodecClassNameProperty(), DummyCodec.class.getName());
CompressionAlgorithm algo = Compression.getCompressionAlgorithmByName("zstd");
algo.setConf(conf);
CompressionCodec dummyCodec = algo.createNewCodec(4096);
assertEquals(DummyCodec.class, dummyCodec.getClass(), "Hadoop override DummyCodec not loaded");
}
@Test
public void testSystemPropertyCodecOverride() {
System.setProperty(new Lz4().getCodecClassNameProperty(), DummyCodec.class.getName());
try {
CompressionAlgorithm algo = Compression.getCompressionAlgorithmByName("lz4");
CompressionCodec dummyCodec = algo.createNewCodec(4096);
assertEquals(DummyCodec.class, dummyCodec.getClass(),
"Hadoop override DummyCodec not loaded");
} finally {
System.clearProperty(new Lz4().getCodecClassNameProperty());
}
}
@Test
public void testSystemPropertyOverridesConf() {
System.setProperty(new Snappy().getCodecClassNameProperty(), DummyCodec.class.getName());
try {
Configuration conf = new Configuration(false);
conf.set(new Snappy().getCodecClassNameProperty(), SnappyCodec.class.getName());
CompressionAlgorithm algo = Compression.getCompressionAlgorithmByName("snappy");
algo.setConf(conf);
CompressionCodec dummyCodec = algo.createNewCodec(4096);
assertEquals(DummyCodec.class, dummyCodec.getClass(),
"Hadoop override DummyCodec not loaded");
} finally {
System.clearProperty(new Snappy().getCodecClassNameProperty());
}
}
}
| 9,342 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/rfile/bcfile/DummyCodec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.rfile.bcfile;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
public class DummyCodec implements Configurable, CompressionCodec {
Configuration conf;
/**
* Set the configuration to be used by this object.
*
* @param conf the configuration object.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* Return the configuration used by this object.
*
* @return the configuration object used by this object.
*/
@Override
public Configuration getConf() {
return conf;
}
/**
* Create a {@link CompressionOutputStream} that will write to the given {@link OutputStream}.
*
* @param out the location for the final output stream
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException unsupported operation
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out) throws IOException {
throw new UnsupportedOperationException();
}
/**
* Create a {@link CompressionOutputStream} that will write to the given {@link OutputStream} with
* the given {@link Compressor}.
*
* @param out the location for the final output stream
* @param compressor compressor to use
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException unsupported operation
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor)
throws IOException {
throw new UnsupportedOperationException();
}
/**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/
@Override
public Class<? extends Compressor> getCompressorType() {
throw new UnsupportedOperationException();
}
/**
* Create a new {@link Compressor} for use by this {@link CompressionCodec}.
*
* @return a new compressor for use by this codec
*/
@Override
public Compressor createCompressor() {
throw new UnsupportedOperationException();
}
/**
* Create a {@link CompressionInputStream} that will read from the given input stream.
*
* @param in the stream to read compressed bytes from
* @return a stream to read uncompressed bytes from
* @throws IOException unsupported operation
*/
@Override
public CompressionInputStream createInputStream(InputStream in) throws IOException {
throw new UnsupportedOperationException();
}
/**
* Create a {@link CompressionInputStream} that will read from the given {@link InputStream} with
* the given {@link Decompressor}.
*
* @param in the stream to read compressed bytes from
* @param decompressor decompressor to use
* @return a stream to read uncompressed bytes from
* @throws IOException unsupported operation
*/
@Override
public CompressionInputStream createInputStream(InputStream in, Decompressor decompressor)
throws IOException {
throw new UnsupportedOperationException();
}
/**
* Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
*
* @return the type of decompressor needed by this codec.
*/
@Override
public Class<? extends Decompressor> getDecompressorType() {
throw new UnsupportedOperationException();
}
/**
* Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
*
* @return a new decompressor for use by this codec
*/
@Override
public Decompressor createDecompressor() {
throw new UnsupportedOperationException();
}
/**
* Get the default filename extension for this kind of compression.
*
* @return <code>.dummy</code>.
*/
@Override
public String getDefaultExtension() {
return ".dummy";
}
}
| 9,343 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/blockfile | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/blockfile/cache/BlockCacheFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.blockfile.cache;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheConfiguration;
import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheManagerFactory;
import org.apache.accumulo.core.file.blockfile.cache.lru.LruBlockCacheManager;
import org.apache.accumulo.core.file.blockfile.cache.tinylfu.TinyLfuBlockCacheManager;
import org.apache.accumulo.core.spi.cache.BlockCacheManager;
import org.apache.accumulo.core.spi.cache.CacheType;
import org.junit.jupiter.api.Test;
public class BlockCacheFactoryTest {
@Test
public void testCreateLruBlockCacheFactory() throws Exception {
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
BlockCacheManagerFactory.getInstance(cc);
}
@Test
public void testCreateTinyLfuBlockCacheFactory() throws Exception {
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, TinyLfuBlockCacheManager.class.getName());
BlockCacheManagerFactory.getInstance(cc);
}
@Test
public void testStartWithDefault() throws Exception {
DefaultConfiguration dc = DefaultConfiguration.getInstance();
BlockCacheManager manager = BlockCacheManagerFactory.getInstance(dc);
manager.start(BlockCacheConfiguration.forTabletServer(dc));
assertNotNull(manager.getBlockCache(CacheType.INDEX));
}
}
| 9,344 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/blockfile | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/blockfile/cache/TestLruBlockCache.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.blockfile.cache;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheConfiguration;
import org.apache.accumulo.core.file.blockfile.cache.impl.BlockCacheManagerFactory;
import org.apache.accumulo.core.file.blockfile.cache.impl.ClassSize;
import org.apache.accumulo.core.file.blockfile.cache.lru.CachedBlock;
import org.apache.accumulo.core.file.blockfile.cache.lru.HeapSize;
import org.apache.accumulo.core.file.blockfile.cache.lru.LruBlockCache;
import org.apache.accumulo.core.file.blockfile.cache.lru.LruBlockCacheConfiguration;
import org.apache.accumulo.core.file.blockfile.cache.lru.LruBlockCacheManager;
import org.apache.accumulo.core.spi.cache.BlockCacheManager;
import org.apache.accumulo.core.spi.cache.CacheEntry;
import org.apache.accumulo.core.spi.cache.CacheType;
import org.junit.jupiter.api.Test;
/**
* Tests the concurrent LruBlockCache.
* <p>
*
* Tests will ensure it grows and shrinks in size properly, evictions run when they're supposed to
* and do what they should, and that cached blocks are accessible when expected to be.
*/
public class TestLruBlockCache {
@Test
public void testConfiguration() {
ConfigurationCopy cc = new ConfigurationCopy();
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
cc.set(Property.TSERV_DEFAULT_BLOCKSIZE, Long.toString(1019));
cc.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(1000023));
cc.set(Property.TSERV_DATACACHE_SIZE, Long.toString(1000027));
cc.set(Property.TSERV_SUMMARYCACHE_SIZE, Long.toString(1000029));
LruBlockCacheConfiguration.builder(Property.TSERV_PREFIX, CacheType.INDEX)
.useEvictionThread(false).minFactor(0.93f).acceptableFactor(0.97f).singleFactor(0.20f)
.multiFactor(0.30f).memoryFactor(0.50f).mapConcurrencyLevel(5).buildMap().forEach(cc::set);
String defaultPrefix = BlockCacheConfiguration.getCachePropertyBase(Property.TSERV_PREFIX)
+ LruBlockCacheConfiguration.PROPERTY_PREFIX + ".default.";
// this should be overridden by cache type specific setting
cc.set(defaultPrefix + LruBlockCacheConfiguration.MEMORY_FACTOR_PROPERTY, "0.6");
// this is not set for the cache type, so should fall back to default
cc.set(defaultPrefix + LruBlockCacheConfiguration.MAP_LOAD_PROPERTY, "0.53");
BlockCacheConfiguration bcc = BlockCacheConfiguration.forTabletServer(cc);
LruBlockCacheConfiguration lbcc = new LruBlockCacheConfiguration(bcc, CacheType.INDEX);
assertFalse(lbcc.isUseEvictionThread());
assertEquals(0.93f, lbcc.getMinFactor(), 0.0000001);
assertEquals(0.97f, lbcc.getAcceptableFactor(), 0.0000001);
assertEquals(0.20f, lbcc.getSingleFactor(), 0.0000001);
assertEquals(0.30f, lbcc.getMultiFactor(), 0.0000001);
assertEquals(0.50f, lbcc.getMemoryFactor(), 0.0000001);
assertEquals(0.53f, lbcc.getMapLoadFactor(), 0.0000001);
assertEquals(5, lbcc.getMapConcurrencyLevel());
assertEquals(1019, lbcc.getBlockSize());
assertEquals(1000023, lbcc.getMaxSize());
}
@Test
public void testBackgroundEvictionThread() throws Exception {
long maxSize = 100000;
long blockSize = calculateBlockSizeDefault(maxSize, 9); // room for 9, will evict
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
BlockCacheManager manager = BlockCacheManagerFactory.getInstance(cc);
cc.set(Property.TSERV_DEFAULT_BLOCKSIZE, Long.toString(blockSize));
cc.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(maxSize));
manager.start(BlockCacheConfiguration.forTabletServer(cc));
LruBlockCache cache = (LruBlockCache) manager.getBlockCache(CacheType.INDEX);
Block[] blocks = generateFixedBlocks(10, blockSize, "block");
// Add all the blocks
for (Block block : blocks) {
cache.cacheBlock(block.blockName, block.buf);
}
// Let the eviction run
int n = 0;
while (cache.getEvictionCount() == 0) {
Thread.sleep(1000);
assertTrue(n++ < 1);
}
// A single eviction run should have occurred
assertEquals(cache.getEvictionCount(), 1);
manager.stop();
}
@Test
public void testCacheSimple() throws Exception {
long maxSize = 1000000;
long blockSize = calculateBlockSizeDefault(maxSize, 101);
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
BlockCacheManager manager = BlockCacheManagerFactory.getInstance(cc);
cc.set(Property.TSERV_DEFAULT_BLOCKSIZE, Long.toString(blockSize));
cc.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(maxSize));
manager.start(BlockCacheConfiguration.forTabletServer(cc));
LruBlockCache cache = (LruBlockCache) manager.getBlockCache(CacheType.INDEX);
Block[] blocks = generateRandomBlocks(100, blockSize);
long expectedCacheSize = cache.heapSize();
// Confirm empty
for (Block block : blocks) {
assertNull(cache.getBlock(block.blockName));
}
// Add blocks
for (Block block : blocks) {
cache.cacheBlock(block.blockName, block.buf);
expectedCacheSize += block.heapSize();
}
// Verify correctly calculated cache heap size
assertEquals(expectedCacheSize, cache.heapSize());
// Check if all blocks are properly cached and retrieved
for (Block block : blocks) {
CacheEntry ce = cache.getBlock(block.blockName);
assertNotNull(ce);
assertEquals(ce.getBuffer().length, block.buf.length);
}
// Verify correctly calculated cache heap size
assertEquals(expectedCacheSize, cache.heapSize());
// Check if all blocks are properly cached and retrieved
for (Block block : blocks) {
CacheEntry ce = cache.getBlock(block.blockName);
assertNotNull(ce);
assertEquals(ce.getBuffer().length, block.buf.length);
}
// Expect no evictions
assertEquals(0, cache.getEvictionCount());
// Thread t = new LruBlockCache.StatisticsThread(cache);
// t.start();
// t.join();
manager.stop();
}
@Test
public void testCacheEvictionSimple() throws Exception {
long maxSize = 100000;
long blockSize = calculateBlockSizeDefault(maxSize, 10);
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
BlockCacheManager manager = BlockCacheManagerFactory.getInstance(cc);
cc.set(Property.TSERV_DEFAULT_BLOCKSIZE, Long.toString(blockSize));
cc.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(maxSize));
LruBlockCacheConfiguration.builder(Property.TSERV_PREFIX, CacheType.INDEX)
.useEvictionThread(false).buildMap().forEach(cc::set);
manager.start(BlockCacheConfiguration.forTabletServer(cc));
LruBlockCache cache = (LruBlockCache) manager.getBlockCache(CacheType.INDEX);
Block[] blocks = generateFixedBlocks(10, blockSize, "block");
long expectedCacheSize = cache.heapSize();
// Add all the blocks
for (Block block : blocks) {
cache.cacheBlock(block.blockName, block.buf);
expectedCacheSize += block.heapSize();
}
// A single eviction run should have occurred
assertEquals(1, cache.getEvictionCount());
// Our expected size overruns acceptable limit
assertTrue(
expectedCacheSize > (maxSize * LruBlockCacheConfiguration.DEFAULT_ACCEPTABLE_FACTOR));
// But the cache did not grow beyond max
assertTrue(cache.heapSize() < maxSize);
// And is still below the acceptable limit
assertTrue(cache.heapSize() < (maxSize * LruBlockCacheConfiguration.DEFAULT_ACCEPTABLE_FACTOR));
// All blocks except block 0 and 1 should be in the cache
assertNull(cache.getBlock(blocks[0].blockName));
assertNull(cache.getBlock(blocks[1].blockName));
for (int i = 2; i < blocks.length; i++) {
assertArrayEquals(cache.getBlock(blocks[i].blockName).getBuffer(), blocks[i].buf);
}
manager.stop();
}
@Test
public void testCacheEvictionTwoPriorities() throws Exception {
long maxSize = 100000;
long blockSize = calculateBlockSizeDefault(maxSize, 10);
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
BlockCacheManager manager = BlockCacheManagerFactory.getInstance(cc);
cc.set(Property.TSERV_DEFAULT_BLOCKSIZE, Long.toString(blockSize));
cc.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(maxSize));
LruBlockCacheConfiguration.builder(Property.TSERV_PREFIX, CacheType.INDEX)
.useEvictionThread(false).minFactor(0.98f).acceptableFactor(0.99f).singleFactor(0.25f)
.multiFactor(0.50f).memoryFactor(0.25f).buildMap().forEach(cc::set);
manager.start(BlockCacheConfiguration.forTabletServer(cc));
LruBlockCache cache = (LruBlockCache) manager.getBlockCache(CacheType.INDEX);
Block[] singleBlocks = generateFixedBlocks(5, 10000, "single");
Block[] multiBlocks = generateFixedBlocks(5, 10000, "multi");
long expectedCacheSize = cache.heapSize();
// Add and get the multi blocks
for (Block block : multiBlocks) {
cache.cacheBlock(block.blockName, block.buf);
expectedCacheSize += block.heapSize();
assertArrayEquals(cache.getBlock(block.blockName).getBuffer(), block.buf);
}
// Add the single blocks (no get)
for (Block block : singleBlocks) {
cache.cacheBlock(block.blockName, block.buf);
expectedCacheSize += block.heapSize();
}
// A single eviction run should have occurred
assertEquals(cache.getEvictionCount(), 1);
// We expect two entries evicted
assertEquals(cache.getEvictedCount(), 2);
// Our expected size overruns acceptable limit
assertTrue(
expectedCacheSize > (maxSize * LruBlockCacheConfiguration.DEFAULT_ACCEPTABLE_FACTOR));
// But the cache did not grow beyond max
assertTrue(cache.heapSize() <= maxSize);
// And is now below the acceptable limit
assertTrue(
cache.heapSize() <= (maxSize * LruBlockCacheConfiguration.DEFAULT_ACCEPTABLE_FACTOR));
// We expect fairness across the two priorities.
// This test makes multi go barely over its limit, in-memory
// empty, and the rest in single. Two single evictions and
// one multi eviction expected.
assertNull(cache.getBlock(singleBlocks[0].blockName));
assertNull(cache.getBlock(multiBlocks[0].blockName));
// And all others to be cached
for (int i = 1; i < 4; i++) {
assertArrayEquals(cache.getBlock(singleBlocks[i].blockName).getBuffer(), singleBlocks[i].buf);
assertArrayEquals(cache.getBlock(multiBlocks[i].blockName).getBuffer(), multiBlocks[i].buf);
}
manager.stop();
}
@Test
public void testCacheEvictionThreePriorities() throws Exception {
long maxSize = 100000;
long blockSize = calculateBlockSize(maxSize, 10);
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
BlockCacheManager manager = BlockCacheManagerFactory.getInstance(cc);
cc.set(Property.TSERV_DEFAULT_BLOCKSIZE, Long.toString(blockSize));
cc.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(maxSize));
LruBlockCacheConfiguration.builder(Property.TSERV_PREFIX, CacheType.INDEX)
.useEvictionThread(false).minFactor(0.98f).acceptableFactor(0.99f).singleFactor(0.33f)
.multiFactor(0.33f).memoryFactor(0.34f).buildMap().forEach(cc::set);
manager.start(BlockCacheConfiguration.forTabletServer(cc));
LruBlockCache cache = (LruBlockCache) manager.getBlockCache(CacheType.INDEX);
Block[] singleBlocks = generateFixedBlocks(5, blockSize, "single");
Block[] multiBlocks = generateFixedBlocks(5, blockSize, "multi");
Block[] memoryBlocks = generateFixedBlocks(5, blockSize, "memory");
long expectedCacheSize = cache.heapSize();
// Add 3 blocks from each priority
for (int i = 0; i < 3; i++) {
// Just add single blocks
cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf);
expectedCacheSize += singleBlocks[i].heapSize();
// Add and get multi blocks
cache.cacheBlock(multiBlocks[i].blockName, multiBlocks[i].buf);
expectedCacheSize += multiBlocks[i].heapSize();
cache.getBlock(multiBlocks[i].blockName);
// Add memory blocks as such
cache.cacheBlock(memoryBlocks[i].blockName, memoryBlocks[i].buf, true);
expectedCacheSize += memoryBlocks[i].heapSize();
}
// Do not expect any evictions yet
assertEquals(0, cache.getEvictionCount());
// Verify cache size
assertEquals(expectedCacheSize, cache.heapSize());
// Insert a single block, oldest single should be evicted
cache.cacheBlock(singleBlocks[3].blockName, singleBlocks[3].buf);
// Single eviction, one thing evicted
assertEquals(1, cache.getEvictionCount());
assertEquals(1, cache.getEvictedCount());
// Verify oldest single block is the one evicted
assertNull(cache.getBlock(singleBlocks[0].blockName));
// Change the oldest remaining single block to a multi
cache.getBlock(singleBlocks[1].blockName);
// Insert another single block
cache.cacheBlock(singleBlocks[4].blockName, singleBlocks[4].buf);
// Two evictions, two evicted.
assertEquals(2, cache.getEvictionCount());
assertEquals(2, cache.getEvictedCount());
// Oldest multi block should be evicted now
assertNull(cache.getBlock(multiBlocks[0].blockName));
// Insert another memory block
cache.cacheBlock(memoryBlocks[3].blockName, memoryBlocks[3].buf, true);
// Three evictions, three evicted.
assertEquals(3, cache.getEvictionCount());
assertEquals(3, cache.getEvictedCount());
// Oldest memory block should be evicted now
assertNull(cache.getBlock(memoryBlocks[0].blockName));
// Add a block that is twice as big (should force two evictions)
Block[] bigBlocks = generateFixedBlocks(3, blockSize * 3, "big");
cache.cacheBlock(bigBlocks[0].blockName, bigBlocks[0].buf);
// Four evictions, six evicted (inserted block 3X size, expect +3 evicted)
assertEquals(4, cache.getEvictionCount());
assertEquals(6, cache.getEvictedCount());
// Expect three remaining singles to be evicted
assertNull(cache.getBlock(singleBlocks[2].blockName));
assertNull(cache.getBlock(singleBlocks[3].blockName));
assertNull(cache.getBlock(singleBlocks[4].blockName));
// Make the big block a multi block
cache.getBlock(bigBlocks[0].blockName);
// Cache another single big block
cache.cacheBlock(bigBlocks[1].blockName, bigBlocks[1].buf);
// Five evictions, nine evicted (3 new)
assertEquals(5, cache.getEvictionCount());
assertEquals(9, cache.getEvictedCount());
// Expect three remaining multis to be evicted
assertNull(cache.getBlock(singleBlocks[1].blockName));
assertNull(cache.getBlock(multiBlocks[1].blockName));
assertNull(cache.getBlock(multiBlocks[2].blockName));
// Cache a big memory block
cache.cacheBlock(bigBlocks[2].blockName, bigBlocks[2].buf, true);
// Six evictions, twelve evicted (3 new)
assertEquals(6, cache.getEvictionCount());
assertEquals(12, cache.getEvictedCount());
// Expect three remaining in-memory to be evicted
assertNull(cache.getBlock(memoryBlocks[1].blockName));
assertNull(cache.getBlock(memoryBlocks[2].blockName));
assertNull(cache.getBlock(memoryBlocks[3].blockName));
manager.stop();
}
// test scan resistance
@Test
public void testScanResistance() throws Exception {
long maxSize = 100000;
long blockSize = calculateBlockSize(maxSize, 10);
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
BlockCacheManager manager = BlockCacheManagerFactory.getInstance(cc);
cc.set(Property.TSERV_DEFAULT_BLOCKSIZE, Long.toString(blockSize));
cc.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(maxSize));
LruBlockCacheConfiguration.builder(Property.TSERV_PREFIX, CacheType.INDEX)
.useEvictionThread(false).minFactor(0.66f).acceptableFactor(0.99f).singleFactor(0.33f)
.multiFactor(0.33f).memoryFactor(0.34f).buildMap().forEach(cc::set);
manager.start(BlockCacheConfiguration.forTabletServer(cc));
LruBlockCache cache = (LruBlockCache) manager.getBlockCache(CacheType.INDEX);
Block[] singleBlocks = generateFixedBlocks(20, blockSize, "single");
Block[] multiBlocks = generateFixedBlocks(5, blockSize, "multi");
// Add 5 multi blocks
for (Block block : multiBlocks) {
cache.cacheBlock(block.blockName, block.buf);
cache.getBlock(block.blockName);
}
// Add 5 single blocks
for (int i = 0; i < 5; i++) {
cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf);
}
// An eviction ran
assertEquals(1, cache.getEvictionCount());
// To drop down to 2/3 capacity, we'll need to evict 4 blocks
assertEquals(4, cache.getEvictedCount());
// Should have been taken off equally from single and multi
assertNull(cache.getBlock(singleBlocks[0].blockName));
assertNull(cache.getBlock(singleBlocks[1].blockName));
assertNull(cache.getBlock(multiBlocks[0].blockName));
assertNull(cache.getBlock(multiBlocks[1].blockName));
// Let's keep "scanning" by adding single blocks. From here on we only
// expect evictions from the single bucket.
// Every time we reach 10 total blocks (every 4 inserts) we get 4 single
// blocks evicted. Inserting 13 blocks should yield 3 more evictions and
// 12 more evicted.
for (int i = 5; i < 18; i++) {
cache.cacheBlock(singleBlocks[i].blockName, singleBlocks[i].buf);
}
// 4 total evictions, 16 total evicted
assertEquals(4, cache.getEvictionCount());
assertEquals(16, cache.getEvictedCount());
// Should now have 7 total blocks
assertEquals(7, cache.size());
manager.stop();
}
private Block[] generateFixedBlocks(int numBlocks, int size, String pfx) {
Block[] blocks = new Block[numBlocks];
for (int i = 0; i < numBlocks; i++) {
blocks[i] = new Block(pfx + i, size);
}
return blocks;
}
private Block[] generateFixedBlocks(int numBlocks, long size, String pfx) {
return generateFixedBlocks(numBlocks, (int) size, pfx);
}
private Block[] generateRandomBlocks(int numBlocks, long maxSize) {
Block[] blocks = new Block[numBlocks];
for (int i = 0; i < numBlocks; i++) {
blocks[i] = new Block("block" + i, RANDOM.get().nextInt((int) maxSize) + 1);
}
return blocks;
}
private long calculateBlockSize(long maxSize, int numBlocks) {
long roughBlockSize = maxSize / numBlocks;
int numEntries = (int) Math.ceil((1.2) * maxSize / roughBlockSize);
long totalOverhead = LruBlockCache.CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP
+ ((long) numEntries * ClassSize.CONCURRENT_HASHMAP_ENTRY)
+ ((long) LruBlockCacheConfiguration.DEFAULT_CONCURRENCY_LEVEL
* ClassSize.CONCURRENT_HASHMAP_SEGMENT);
long negateBlockSize = totalOverhead / numEntries;
negateBlockSize += CachedBlock.PER_BLOCK_OVERHEAD;
return ClassSize.align((long) Math.floor((roughBlockSize - negateBlockSize) * 0.99f));
}
private long calculateBlockSizeDefault(long maxSize, int numBlocks) {
long roughBlockSize = maxSize / numBlocks;
int numEntries = (int) Math.ceil((1.2) * maxSize / roughBlockSize);
long totalOverhead = LruBlockCache.CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP
+ ((long) numEntries * ClassSize.CONCURRENT_HASHMAP_ENTRY)
+ ((long) LruBlockCacheConfiguration.DEFAULT_CONCURRENCY_LEVEL
* ClassSize.CONCURRENT_HASHMAP_SEGMENT);
long negateBlockSize = totalOverhead / numEntries;
negateBlockSize += CachedBlock.PER_BLOCK_OVERHEAD;
return ClassSize.align((long) Math.floor(
(roughBlockSize - negateBlockSize) * LruBlockCacheConfiguration.DEFAULT_ACCEPTABLE_FACTOR));
}
private static class Block implements HeapSize {
String blockName;
byte[] buf;
Block(String blockName, int size) {
this.blockName = blockName;
this.buf = new byte[size];
}
@Override
public long heapSize() {
return CachedBlock.PER_BLOCK_OVERHEAD + ClassSize.align(blockName.length())
+ ClassSize.align(buf.length);
}
}
}
| 9,345 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/blockfile | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/file/blockfile/cache/TestCachedBlockQueue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.file.blockfile.cache;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import org.apache.accumulo.core.file.blockfile.cache.lru.CachedBlockQueue;
import org.junit.jupiter.api.Test;
public class TestCachedBlockQueue {
@Test
public void testLargeBlock() {
CachedBlockQueue queue = new CachedBlockQueue(10000L, 1000L);
CachedBlock cb1 = new CachedBlock(10001L, "cb1", 1L);
queue.add(cb1);
List<org.apache.accumulo.core.file.blockfile.cache.lru.CachedBlock> blocks = getList(queue);
assertEquals("cb1", Objects.requireNonNull(blocks.get(0)).getName());
}
@Test
public void testAddNewerBlock() {
CachedBlockQueue queue = new CachedBlockQueue(10000L, 1000L);
AtomicLong sum = new AtomicLong();
CachedBlock cb1 = new CachedBlock(5000L, "cb1", 1L);
cb1.recordSize(sum);
CachedBlock cb2 = new CachedBlock(5000, "cb2", 2L);
cb2.recordSize(sum);
CachedBlock cb3 = new CachedBlock(5000, "cb3", 3L);
cb3.recordSize(sum);
queue.add(cb1);
queue.add(cb2);
queue.add(cb3);
List<org.apache.accumulo.core.file.blockfile.cache.lru.CachedBlock> blocks = getList(queue);
assertEquals(2, blocks.size());
long expectedSize = cb1.heapSize() + cb2.heapSize();
assertEquals(expectedSize, queue.heapSize());
assertEquals(expectedSize, sum.get() - cb3.heapSize());
assertEquals(List.of("cb1", "cb2"),
blocks.stream().map(cb -> cb.getName()).collect(Collectors.toList()));
}
@Test
public void testQueue() {
AtomicLong sum = new AtomicLong();
CachedBlock cb1 = new CachedBlock(1000, "cb1", 1);
cb1.recordSize(sum);
CachedBlock cb2 = new CachedBlock(1500, "cb2", 2);
cb2.recordSize(sum);
CachedBlock cb3 = new CachedBlock(1000, "cb3", 3);
cb3.recordSize(sum);
CachedBlock cb4 = new CachedBlock(1500, "cb4", 4);
cb4.recordSize(sum);
CachedBlock cb5 = new CachedBlock(1000, "cb5", 5);
cb5.recordSize(sum);
CachedBlock cb6 = new CachedBlock(1750, "cb6", 6);
cb6.recordSize(sum);
CachedBlock cb7 = new CachedBlock(1000, "cb7", 7);
cb7.recordSize(sum);
CachedBlock cb8 = new CachedBlock(1500, "cb8", 8);
cb8.recordSize(sum);
CachedBlock cb9 = new CachedBlock(1000, "cb9", 9);
cb9.recordSize(sum);
CachedBlock cb10 = new CachedBlock(1500, "cb10", 10);
cb10.recordSize(sum);
CachedBlockQueue queue = new CachedBlockQueue(10000, 1000);
queue.add(cb1);
queue.add(cb2);
queue.add(cb3);
queue.add(cb4);
queue.add(cb5);
queue.add(cb6);
queue.add(cb7);
queue.add(cb8);
queue.add(cb9);
queue.add(cb10);
// We expect cb1 through cb8 to be in the queue
long expectedSize = cb1.heapSize() + cb2.heapSize() + cb3.heapSize() + cb4.heapSize()
+ cb5.heapSize() + cb6.heapSize() + cb7.heapSize() + cb8.heapSize();
assertEquals(expectedSize, queue.heapSize());
assertEquals(expectedSize, sum.get() - cb9.heapSize() - cb10.heapSize());
List<org.apache.accumulo.core.file.blockfile.cache.lru.CachedBlock> blocks = getList(queue);
assertEquals(List.of("cb1", "cb2", "cb3", "cb4", "cb5", "cb6", "cb7", "cb8"),
blocks.stream().map(cb -> cb.getName()).collect(Collectors.toList()));
}
@Test
public void testQueueSmallBlockEdgeCase() {
AtomicLong sum = new AtomicLong();
CachedBlock cb1 = new CachedBlock(1000, "cb1", 1);
cb1.recordSize(sum);
CachedBlock cb2 = new CachedBlock(1500, "cb2", 2);
cb2.recordSize(sum);
CachedBlock cb3 = new CachedBlock(1000, "cb3", 3);
cb3.recordSize(sum);
CachedBlock cb4 = new CachedBlock(1500, "cb4", 4);
cb4.recordSize(sum);
CachedBlock cb5 = new CachedBlock(1000, "cb5", 5);
cb5.recordSize(sum);
CachedBlock cb6 = new CachedBlock(1750, "cb6", 6);
cb6.recordSize(sum);
CachedBlock cb7 = new CachedBlock(1000, "cb7", 7);
cb7.recordSize(sum);
CachedBlock cb8 = new CachedBlock(1500, "cb8", 8);
cb8.recordSize(sum);
CachedBlock cb9 = new CachedBlock(1000, "cb9", 9);
cb9.recordSize(sum);
CachedBlock cb10 = new CachedBlock(1500, "cb10", 10);
cb10.recordSize(sum);
// validate that sum was not improperly added to heapSize in recordSize method.
assertEquals(cb3.heapSize(), cb7.heapSize());
CachedBlockQueue queue = new CachedBlockQueue(10000, 1000);
queue.add(cb1);
queue.add(cb2);
queue.add(cb3);
queue.add(cb4);
queue.add(cb5);
queue.add(cb6);
queue.add(cb7);
queue.add(cb8);
queue.add(cb9);
queue.add(cb10);
CachedBlock cb0 = new CachedBlock(10 + CachedBlock.PER_BLOCK_OVERHEAD, "cb0", 0);
queue.add(cb0);
// This is older, so we must include it, but it will not end up kicking
// anything out because (heapSize - cb8.heapSize + cb0.heapSize < maxSize)
// and we must always maintain heapSize >= maxSize once we achieve it.
// We expect cb0 through cb8 to be in the queue
long expectedSize = cb1.heapSize() + cb2.heapSize() + cb3.heapSize() + cb4.heapSize()
+ cb5.heapSize() + cb6.heapSize() + cb7.heapSize() + cb8.heapSize() + cb0.heapSize();
assertEquals(expectedSize, queue.heapSize());
assertEquals(expectedSize, sum.get() - cb9.heapSize() - cb10.heapSize());
List<org.apache.accumulo.core.file.blockfile.cache.lru.CachedBlock> blocks = getList(queue);
assertEquals(List.of("cb0", "cb1", "cb2", "cb3", "cb4", "cb5", "cb6", "cb7", "cb8"),
blocks.stream().map(cb -> cb.getName()).collect(Collectors.toList()));
}
private static class CachedBlock
extends org.apache.accumulo.core.file.blockfile.cache.lru.CachedBlock {
public CachedBlock(long heapSize, String name, long accessTime) {
super(name, new byte[(int) (heapSize - CachedBlock.PER_BLOCK_OVERHEAD)], accessTime, false);
}
}
/**
* Get a sorted List of all elements in this queue, in descending order.
*
* @return list of cached elements in descending order
*/
private List<org.apache.accumulo.core.file.blockfile.cache.lru.CachedBlock>
getList(final CachedBlockQueue queue) {
return List.of(queue.get());
}
}
| 9,346 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/security/NamespacePermissionsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.security;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.EnumSet;
import org.junit.jupiter.api.Test;
public class NamespacePermissionsTest {
@Test
public void testEnsureEquivalencies() {
EnumSet<NamespacePermission> set = EnumSet.allOf(NamespacePermission.class);
for (TablePermission permission : TablePermission.values()) {
set.remove(NamespacePermission.getEquivalent(permission));
}
for (SystemPermission permission : SystemPermission.values()) {
set.remove(NamespacePermission.getEquivalent(permission));
}
assertTrue(set.isEmpty(),
"All namespace permissions should have equivalent table or system permissions.");
}
}
| 9,347 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/security/AuthenticationTokenTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.security;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.stream.IntStream;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
import org.apache.accumulo.core.client.security.tokens.NullToken;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.junit.jupiter.api.Test;
public class AuthenticationTokenTest {
@Test
public void testSerializeDeserializeToken() {
byte[] randomBytes = new byte[12];
do {
// random fill, but avoid all zeros case
RANDOM.get().nextBytes(randomBytes);
} while (IntStream.range(0, randomBytes.length).allMatch(i -> randomBytes[i] == 0));
byte[] serialized = AuthenticationTokenSerializer.serialize(new PasswordToken(randomBytes));
PasswordToken passwordToken =
AuthenticationTokenSerializer.deserialize(PasswordToken.class, serialized);
assertArrayEquals(randomBytes, passwordToken.getPassword());
serialized = AuthenticationTokenSerializer.serialize(new NullToken());
AuthenticationToken nullToken =
AuthenticationTokenSerializer.deserialize(NullToken.class, serialized);
assertEquals(new NullToken(), nullToken);
}
}
| 9,348 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.security;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.accumulo.core.security.ColumnVisibility.quote;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Comparator;
import org.apache.accumulo.core.security.ColumnVisibility.Node;
import org.apache.accumulo.core.security.ColumnVisibility.NodeComparator;
import org.apache.accumulo.core.security.ColumnVisibility.NodeType;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class ColumnVisibilityTest {
private void shouldThrow(String... strings) {
for (String s : strings) {
final byte[] sBytes = s.getBytes();
assertThrows(IllegalArgumentException.class, () -> new ColumnVisibility(sBytes),
"Should throw: " + s);
}
}
private void shouldNotThrow(String... strings) {
for (String s : strings) {
new ColumnVisibility(s.getBytes());
}
}
@Test
public void testEmpty() {
// empty visibility is valid
ColumnVisibility a = new ColumnVisibility();
ColumnVisibility b = new ColumnVisibility(new byte[0]);
ColumnVisibility c = new ColumnVisibility("");
ColumnVisibility d = new ColumnVisibility(new Text());
assertEquals(a, b);
assertEquals(a, c);
assertEquals(a, d);
}
@Test
public void testEmptyFlatten() {
// empty visibility is valid
new ColumnVisibility().flatten();
new ColumnVisibility("").flatten();
}
@Test
public void testSimple() {
shouldNotThrow("test", "(one)");
}
@Test
public void testCompound() {
shouldNotThrow("a|b", "a&b", "ab&bc");
shouldNotThrow("A&B&C&D&E", "A|B|C|D|E", "(A|B|C)", "(A)|B|(C)", "A&(B)&(C)", "A&B&(L)");
shouldNotThrow("_&-&:");
}
@Test
public void testBadCharacters() {
shouldThrow("=", "*", "^", "%", "@");
shouldThrow("a*b");
}
public void normalized(String... values) {
for (int i = 0; i < values.length; i += 2) {
ColumnVisibility cv = new ColumnVisibility(values[i].getBytes());
assertArrayEquals(cv.flatten(), values[i + 1].getBytes());
}
}
@Test
public void testComplexCompound() {
shouldNotThrow("(a|b)&(x|y)");
shouldNotThrow("a&(x|y)", "(a|b)&(x|y)", "A&(L|M)", "B&(L|M)", "A&B&(L|M)");
shouldNotThrow("A&FOO&(L|M)", "(A|B)&FOO&(L|M)", "A&B&(L|M|FOO)", "((A|B|C)|foo)&bar");
shouldNotThrow("(one&two)|(foo&bar)", "(one|foo)&three", "one|foo|bar", "(one|foo)|bar",
"((one|foo)|bar)&two");
}
@Test
public void testNormalization() {
normalized("a", "a", "(a)", "a", "b|a", "a|b", "(b)|a", "a|b", "(b|(a|c))&x", "x&(a|b|c)",
"(((a)))", "a");
final String normForm = "a&b&c";
normalized("b&c&a", normForm, "c&b&a", normForm, "a&(b&c)", normForm, "(a&c)&b", normForm);
// this an expression that's basically `expr | expr`
normalized("(d&c&b&a)|(b&c&a&d)", "a&b&c&d");
}
@Test
public void testDanglingOperators() {
shouldThrow("a|b&");
shouldThrow("(|a)");
shouldThrow("|");
shouldThrow("a|", "|a", "|", "&");
shouldThrow("&(five)", "|(five)", "(five)&", "five|", "a|(b)&", "(&five)", "(five|)");
}
@Test
public void testMissingSeparators() {
shouldThrow("one(five)", "(five)one", "(one)(two)", "a|(b(c))");
}
@Test
public void testMismatchedParentheses() {
shouldThrow("(", ")", "(a&b", "b|a)", "A|B)");
}
@Test
public void testMixedOperators() {
shouldThrow("(A&B)|(C&D)&(E)");
shouldThrow("a|b&c", "A&B&C|D", "(A&B)|(C&D)&(E)");
}
@Test
public void testQuotes() {
shouldThrow("\"\"");
shouldThrow("\"A\"A");
shouldThrow("\"A\"\"B\"");
shouldThrow("(A)\"B\"");
shouldThrow("\"A\"(B)");
shouldThrow("\"A");
shouldThrow("\"");
shouldThrow("\"B");
shouldThrow("A&\"B");
shouldThrow("A&\"B\\'");
shouldThrow("A&\"B\\");
shouldNotThrow("\"A\"");
shouldNotThrow("(\"A\")");
shouldNotThrow("A&\"B.D\"");
shouldNotThrow("A&\"B\\\\D\"");
shouldNotThrow("A&\"B\\\"D\"");
}
@Test
public void testToString() {
ColumnVisibility cv = new ColumnVisibility(quote("a"));
assertEquals("[a]", cv.toString());
// multi-byte
cv = new ColumnVisibility(quote("五"));
assertEquals("[\"五\"]", cv.toString());
}
@Test
public void testParseTree() {
Node node = parse("(W)|(U&V)");
assertNode(node, NodeType.OR, 0, 9);
assertNode(node.getChildren().get(0), NodeType.TERM, 1, 2);
assertNode(node.getChildren().get(1), NodeType.AND, 5, 8);
}
@Test
public void testParseTreeWithNoChildren() {
Node node = parse("ABC");
assertNode(node, NodeType.TERM, 0, 3);
}
@Test
public void testParseTreeWithTwoChildren() {
Node node = parse("ABC|DEF");
assertNode(node, NodeType.OR, 0, 7);
assertNode(node.getChildren().get(0), NodeType.TERM, 0, 3);
assertNode(node.getChildren().get(1), NodeType.TERM, 4, 7);
}
@Test
public void testParseTreeWithParenthesesAndTwoChildren() {
Node node = parse("(ABC|DEF)");
assertNode(node, NodeType.OR, 1, 8);
assertNode(node.getChildren().get(0), NodeType.TERM, 1, 4);
assertNode(node.getChildren().get(1), NodeType.TERM, 5, 8);
}
@Test
public void testParseTreeWithParenthesizedChildren() {
Node node = parse("ABC|(DEF&GHI)");
assertNode(node, NodeType.OR, 0, 13);
assertNode(node.getChildren().get(0), NodeType.TERM, 0, 3);
assertNode(node.getChildren().get(1), NodeType.AND, 5, 12);
assertNode(node.getChildren().get(1).children.get(0), NodeType.TERM, 5, 8);
assertNode(node.getChildren().get(1).children.get(1), NodeType.TERM, 9, 12);
}
@Test
public void testParseTreeWithMoreParentheses() {
Node node = parse("(W)|(U&V)");
assertNode(node, NodeType.OR, 0, 9);
assertNode(node.getChildren().get(0), NodeType.TERM, 1, 2);
assertNode(node.getChildren().get(1), NodeType.AND, 5, 8);
assertNode(node.getChildren().get(1).children.get(0), NodeType.TERM, 5, 6);
assertNode(node.getChildren().get(1).children.get(1), NodeType.TERM, 7, 8);
}
@Test
public void testEmptyParseTreesAreEqual() {
Comparator<Node> comparator = new NodeComparator(new byte[] {});
Node empty = new ColumnVisibility().getParseTree();
assertEquals(0, comparator.compare(empty, parse("")));
}
@Test
public void testParseTreesOrdering() {
byte[] expression = "(b&c&d)|((a|m)&y&z)|(e&f)".getBytes(UTF_8);
byte[] flattened = new ColumnVisibility(expression).flatten();
// Convert to String for indexOf convenience
String flat = new String(flattened, UTF_8);
assertTrue(flat.indexOf('e') < flat.indexOf('|'), "shortest expressions sort first");
assertTrue(flat.indexOf('b') < flat.indexOf('a'), "shortest children sort first");
}
private Node parse(String s) {
ColumnVisibility v = new ColumnVisibility(s);
return v.getParseTree();
}
private void assertNode(Node node, NodeType nodeType, int start, int end) {
assertEquals(node.type, nodeType);
assertEquals(start, node.start);
assertEquals(end, node.end);
}
}
| 9,349 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/security/CredentialsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.security;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import javax.security.auth.DestroyFailedException;
import org.apache.accumulo.core.WithTestNames;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.security.SecurityErrorCode;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
import org.apache.accumulo.core.client.security.tokens.NullToken;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.clientImpl.Credentials;
import org.apache.accumulo.core.data.InstanceId;
import org.apache.accumulo.core.securityImpl.thrift.TCredentials;
import org.junit.jupiter.api.Test;
public class CredentialsTest extends WithTestNames {
@Test
public void testToThrift() throws DestroyFailedException {
var instanceID = InstanceId.of(testName());
// verify thrift serialization
Credentials creds = new Credentials("test", new PasswordToken("testing"));
TCredentials tCreds = creds.toThrift(instanceID);
assertEquals("test", tCreds.getPrincipal());
assertEquals(PasswordToken.class.getName(), tCreds.getTokenClassName());
assertArrayEquals(AuthenticationTokenSerializer.serialize(new PasswordToken("testing")),
tCreds.getToken());
// verify that we can't serialize if it's destroyed
creds.getToken().destroy();
Exception e = assertThrows(RuntimeException.class, () -> creds.toThrift(instanceID));
assertSame(AccumuloSecurityException.class, e.getCause().getClass());
assertEquals(AccumuloSecurityException.class.cast(e.getCause()).getSecurityErrorCode(),
SecurityErrorCode.TOKEN_EXPIRED);
}
@Test
public void roundtripThrift() {
var instanceID = InstanceId.of(testName());
Credentials creds = new Credentials("test", new PasswordToken("testing"));
TCredentials tCreds = creds.toThrift(instanceID);
Credentials roundtrip = Credentials.fromThrift(tCreds);
assertEquals(creds, roundtrip, "Round-trip through thrift changed credentials equality");
}
@Test
public void testEqualsAndHashCode() {
Credentials nullNullCreds = new Credentials(null, null);
Credentials abcNullCreds = new Credentials("abc", new NullToken());
Credentials cbaNullCreds = new Credentials("cba", new NullToken());
Credentials abcBlahCreds = new Credentials("abc", new PasswordToken("blah"));
// check hash codes
assertEquals(0, nullNullCreds.hashCode());
assertEquals("abc".hashCode(), abcNullCreds.hashCode());
assertEquals(abcNullCreds.hashCode(), abcBlahCreds.hashCode());
assertNotEquals(abcNullCreds.hashCode(), cbaNullCreds.hashCode());
// identity
assertEquals(abcNullCreds, abcNullCreds);
assertEquals(new Credentials("abc", new NullToken()), abcNullCreds);
// equal, but different token constructors
assertEquals(new Credentials("abc", new PasswordToken("abc".getBytes(UTF_8))),
new Credentials("abc", new PasswordToken("abc")));
// test not equals
assertNotEquals(nullNullCreds, abcBlahCreds);
assertNotEquals(nullNullCreds, abcNullCreds);
assertNotEquals(abcNullCreds, abcBlahCreds);
}
@Test
public void testCredentialsSerialization() {
Credentials creds = new Credentials("a:b-c", new PasswordToken("d-e-f".getBytes(UTF_8)));
String serialized = creds.serialize();
Credentials result = Credentials.deserialize(serialized);
assertEquals(creds, result);
assertEquals("a:b-c", result.getPrincipal());
assertEquals(new PasswordToken("d-e-f"), result.getToken());
Credentials nullNullCreds = new Credentials(null, null);
serialized = nullNullCreds.serialize();
result = Credentials.deserialize(serialized);
assertNull(result.getPrincipal());
assertNull(result.getToken());
}
@Test
public void testToString() {
Credentials creds = new Credentials(null, null);
assertEquals(Credentials.class.getName() + ":null:null:<hidden>", creds.toString());
creds = new Credentials("", new NullToken());
assertEquals(Credentials.class.getName() + "::" + NullToken.class.getName() + ":<hidden>",
creds.toString());
creds = new Credentials("abc", null);
assertEquals(Credentials.class.getName() + ":abc:null:<hidden>", creds.toString());
creds = new Credentials("abc", new PasswordToken(""));
assertEquals(
Credentials.class.getName() + ":abc:" + PasswordToken.class.getName() + ":<hidden>",
creds.toString());
}
}
| 9,350 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/security/AuthenticationTokenIdentifierTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.security;
import static org.apache.accumulo.core.clientImpl.AuthenticationTokenIdentifier.createTAuthIdentifier;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.accumulo.core.clientImpl.AuthenticationTokenIdentifier;
import org.apache.accumulo.core.data.InstanceId;
import org.apache.accumulo.core.securityImpl.thrift.TAuthenticationTokenIdentifier;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.jupiter.api.Test;
public class AuthenticationTokenIdentifierTest {
@Test
public void testUgi() {
String principal = "user";
var token = new AuthenticationTokenIdentifier(new TAuthenticationTokenIdentifier(principal));
UserGroupInformation actual = token.getUser();
UserGroupInformation expected = UserGroupInformation.createRemoteUser(principal);
assertEquals(expected.getAuthenticationMethod(), actual.getAuthenticationMethod());
assertEquals(expected.getUserName(), expected.getUserName());
}
@Test
public void testEquality() {
String principal = "user";
var token = new AuthenticationTokenIdentifier(new TAuthenticationTokenIdentifier(principal));
assertEquals(token, token);
var newToken = new AuthenticationTokenIdentifier(new TAuthenticationTokenIdentifier(principal));
assertEquals(token, newToken);
assertEquals(token.hashCode(), newToken.hashCode());
}
@Test
public void testExtendedEquality() {
String principal = "user";
var token = new AuthenticationTokenIdentifier(new TAuthenticationTokenIdentifier(principal));
assertEquals(token, token);
var newToken =
new AuthenticationTokenIdentifier(createTAuthIdentifier(principal, 1, 5L, 10L, "uuid"));
assertNotEquals(token, newToken);
assertNotEquals(token.hashCode(), newToken.hashCode());
var dblNewToken =
new AuthenticationTokenIdentifier(new TAuthenticationTokenIdentifier(principal));
dblNewToken.setKeyId(1);
dblNewToken.setIssueDate(5L);
dblNewToken.setExpirationDate(10L);
dblNewToken.setInstanceId(InstanceId.of("uuid"));
}
@Test
public void testToString() {
String principal = "my_special_principal";
var token = new AuthenticationTokenIdentifier(new TAuthenticationTokenIdentifier(principal));
assertTrue(token.toString().contains(principal));
}
@Test
public void testSerialization() throws IOException {
String principal = "my_special_principal";
var token = new AuthenticationTokenIdentifier(new TAuthenticationTokenIdentifier(principal));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
token.write(out);
DataInputStream in = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
AuthenticationTokenIdentifier deserializedToken = new AuthenticationTokenIdentifier();
deserializedToken.readFields(in);
assertEquals(token, deserializedToken);
assertEquals(token.hashCode(), deserializedToken.hashCode());
assertEquals(token.toString(), deserializedToken.toString());
}
@Test
public void testTokenKind() {
String principal = "my_special_principal";
var token = new AuthenticationTokenIdentifier(new TAuthenticationTokenIdentifier(principal));
assertEquals(AuthenticationTokenIdentifier.TOKEN_KIND, token.getKind());
}
@Test
public void testNullMsg() throws IOException {
AuthenticationTokenIdentifier token = new AuthenticationTokenIdentifier();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
token.write(out);
DataInputStream in = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
AuthenticationTokenIdentifier deserializedToken = new AuthenticationTokenIdentifier();
deserializedToken.readFields(in);
assertEquals(token, deserializedToken);
assertEquals(token.hashCode(), deserializedToken.hashCode());
assertEquals(token.toString(), deserializedToken.toString());
}
}
| 9,351 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/security/VisibilityEvaluatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.security;
import static org.apache.accumulo.core.security.ColumnVisibility.quote;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.List;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.util.ByteArraySet;
import org.junit.jupiter.api.Test;
public class VisibilityEvaluatorTest {
@Test
public void testVisibilityEvaluator() throws VisibilityParseException {
VisibilityEvaluator ct = new VisibilityEvaluator(
new Authorizations(ByteArraySet.fromStrings("one", "two", "three", "four")));
// test for empty vis
assertTrue(ct.evaluate(new ColumnVisibility(new byte[0])));
// test for and
assertTrue(ct.evaluate(new ColumnVisibility("one&two")), "'and' test");
// test for or
assertTrue(ct.evaluate(new ColumnVisibility("foor|four")), "'or' test");
// test for and and or
assertTrue(ct.evaluate(new ColumnVisibility("(one&two)|(foo&bar)")), "'and' and 'or' test");
// test for false negatives
for (String marking : new String[] {"one", "one|five", "five|one", "(one)",
"(one&two)|(foo&bar)", "(one|foo)&three", "one|foo|bar", "(one|foo)|bar",
"((one|foo)|bar)&two"}) {
assertTrue(ct.evaluate(new ColumnVisibility(marking)), marking);
}
// test for false positives
for (String marking : new String[] {"five", "one&five", "five&one", "((one|foo)|bar)&goober"}) {
assertFalse(ct.evaluate(new ColumnVisibility(marking)), marking);
}
}
@Test
public void testQuotedExpressions() throws VisibilityParseException {
Authorizations auths = new Authorizations("A#C", "A\"C", "A\\C", "AC");
VisibilityEvaluator ct = new VisibilityEvaluator(auths);
runQuoteTest(ct);
// construct VisibilityEvaluator using another constructor and run test again
ct = new VisibilityEvaluator((AuthorizationContainer) auths);
runQuoteTest(ct);
}
private void runQuoteTest(VisibilityEvaluator ct) throws VisibilityParseException {
assertTrue(ct.evaluate(new ColumnVisibility(quote("A#C") + "|" + quote("A?C"))));
assertTrue(ct.evaluate(
new ColumnVisibility(new ColumnVisibility(quote("A#C") + "|" + quote("A?C")).flatten())));
assertTrue(ct.evaluate(new ColumnVisibility(quote("A\"C") + "&" + quote("A\\C"))));
assertTrue(ct.evaluate(
new ColumnVisibility(new ColumnVisibility(quote("A\"C") + "&" + quote("A\\C")).flatten())));
assertTrue(
ct.evaluate(new ColumnVisibility("(" + quote("A\"C") + "|B)&(" + quote("A#C") + "|D)")));
assertFalse(ct.evaluate(new ColumnVisibility(quote("A#C") + "&B")));
assertTrue(ct.evaluate(new ColumnVisibility(quote("A#C"))));
assertTrue(ct.evaluate(new ColumnVisibility("(" + quote("A#C") + ")")));
}
@Test
public void testQuote() {
assertEquals("\"A#C\"", quote("A#C"));
assertEquals("\"A\\\"C\"", quote("A\"C"));
assertEquals("\"A\\\"\\\\C\"", quote("A\"\\C"));
assertEquals("ACS", quote("ACS"));
assertEquals("\"九\"", quote("九"));
assertEquals("\"五十\"", quote("五十"));
}
@Test
public void testUnescape() {
assertEquals("a\"b", VisibilityEvaluator.unescape(new ArrayByteSequence("a\\\"b")).toString());
assertEquals("a\\b", VisibilityEvaluator.unescape(new ArrayByteSequence("a\\\\b")).toString());
assertEquals("a\\\"b",
VisibilityEvaluator.unescape(new ArrayByteSequence("a\\\\\\\"b")).toString());
assertEquals("\\\"",
VisibilityEvaluator.unescape(new ArrayByteSequence("\\\\\\\"")).toString());
assertEquals("a\\b\\c\\d",
VisibilityEvaluator.unescape(new ArrayByteSequence("a\\\\b\\\\c\\\\d")).toString());
final String message = "Expected failure to unescape invalid escape sequence";
final var invalidEscapeSeqList = List.of(new ArrayByteSequence("a\\b"),
new ArrayByteSequence("a\\b\\c"), new ArrayByteSequence("a\"b\\"));
invalidEscapeSeqList.forEach(seq -> assertThrows(IllegalArgumentException.class,
() -> VisibilityEvaluator.unescape(seq), message));
}
@Test
public void testNonAscii() throws VisibilityParseException {
VisibilityEvaluator ct = new VisibilityEvaluator(new Authorizations("五", "六", "八", "九", "五十"));
assertTrue(ct.evaluate(new ColumnVisibility(quote("五") + "|" + quote("四"))));
assertFalse(ct.evaluate(new ColumnVisibility(quote("五") + "&" + quote("四"))));
assertTrue(
ct.evaluate(new ColumnVisibility(quote("五") + "&(" + quote("四") + "|" + quote("九") + ")")));
assertTrue(ct.evaluate(new ColumnVisibility("\"五\"&(\"四\"|\"五十\")")));
assertFalse(
ct.evaluate(new ColumnVisibility(quote("五") + "&(" + quote("四") + "|" + quote("三") + ")")));
assertFalse(ct.evaluate(new ColumnVisibility("\"五\"&(\"四\"|\"三\")")));
}
}
| 9,352 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/security/AuthorizationsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.security;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.nio.ByteBuffer;
import org.apache.accumulo.core.util.ByteArraySet;
import org.junit.jupiter.api.Test;
public class AuthorizationsTest {
@Test
public void testSetOfByteArrays() {
assertTrue(ByteArraySet.fromStrings("a", "b", "c").contains("a".getBytes()));
}
@Test
public void testEncodeDecode() {
Authorizations a = new Authorizations("a", "abcdefg", "hijklmno", ",");
byte[] array = a.getAuthorizationsArray();
Authorizations b = new Authorizations(array);
assertEquals(a, b);
// test encoding empty auths
a = new Authorizations();
array = a.getAuthorizationsArray();
b = new Authorizations(array);
assertEquals(a, b);
// test encoding multi-byte auths
a = new Authorizations("五", "b", "c", "九");
array = a.getAuthorizationsArray();
b = new Authorizations(array);
assertEquals(a, b);
}
@Test
public void testSerialization() {
Authorizations a1 = new Authorizations("a", "b");
Authorizations a2 = new Authorizations("b", "a");
assertEquals(a1, a2);
assertEquals(a1.serialize(), a2.serialize());
}
@Test
public void testDefensiveAccess() {
Authorizations expected = new Authorizations("foo", "a");
Authorizations actual = new Authorizations("foo", "a");
// foo to goo; test defensive iterator
for (byte[] bytes : actual) {
bytes[0]++;
}
assertArrayEquals(expected.getAuthorizationsArray(), actual.getAuthorizationsArray());
// test defensive getter and serializer
actual.getAuthorizations().get(0)[0]++;
assertArrayEquals(expected.getAuthorizationsArray(), actual.getAuthorizationsArray());
assertEquals(expected.serialize(), actual.serialize());
}
// This should throw ReadOnlyBufferException, but THRIFT-883 requires that the ByteBuffers
// themselves not be read-only
// @Test(expected = ReadOnlyBufferException.class)
@Test
public void testReadOnlyByteBuffer() {
Authorizations expected = new Authorizations("foo");
Authorizations actual = new Authorizations("foo");
assertArrayEquals(expected.getAuthorizationsArray(), actual.getAuthorizationsArray());
actual.getAuthorizationsBB().get(0).array()[0]++;
assertArrayEquals(expected.getAuthorizationsArray(), actual.getAuthorizationsArray());
}
@Test
public void testUnmodifiableList() {
Authorizations expected = new Authorizations("foo");
Authorizations actual = new Authorizations("foo");
assertArrayEquals(expected.getAuthorizationsArray(), actual.getAuthorizationsArray());
assertThrows(UnsupportedOperationException.class,
() -> actual.getAuthorizationsBB().add(ByteBuffer.wrap(new byte[] {'a'})));
}
}
| 9,353 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/lock/ServiceLockDataTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.lock;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Optional;
import java.util.UUID;
import org.apache.accumulo.core.lock.ServiceLockData.ServiceDescriptor;
import org.apache.accumulo.core.lock.ServiceLockData.ServiceDescriptors;
import org.apache.accumulo.core.lock.ServiceLockData.ThriftService;
import org.junit.jupiter.api.Test;
import com.google.common.net.HostAndPort;
public class ServiceLockDataTest {
private final UUID serverUUID = UUID.randomUUID();
@Test
public void testSingleServiceConstructor() throws Exception {
ServiceLockData ss = new ServiceLockData(serverUUID, "127.0.0.1", ThriftService.TSERV);
assertEquals(serverUUID, ss.getServerUUID(ThriftService.TSERV));
assertEquals("127.0.0.1", ss.getAddressString(ThriftService.TSERV));
assertThrows(IllegalArgumentException.class, () -> ss.getAddress(ThriftService.TSERV));
assertEquals(ServiceDescriptor.DEFAULT_GROUP_NAME, ss.getGroup(ThriftService.TSERV));
assertNull(ss.getServerUUID(ThriftService.TABLET_SCAN));
assertNull(ss.getAddressString(ThriftService.TABLET_SCAN));
assertNull(ss.getAddress(ThriftService.TABLET_SCAN));
assertNull(ss.getGroup(ThriftService.TABLET_SCAN));
}
@Test
public void testMultipleServiceConstructor() throws Exception {
ServiceDescriptors sds = new ServiceDescriptors();
sds.addService(new ServiceDescriptor(serverUUID, ThriftService.TSERV, "127.0.0.1:9997"));
sds.addService(new ServiceDescriptor(serverUUID, ThriftService.TABLET_SCAN, "127.0.0.1:9998"));
ServiceLockData ss = new ServiceLockData(sds);
assertEquals(serverUUID, ss.getServerUUID(ThriftService.TSERV));
assertEquals("127.0.0.1:9997", ss.getAddressString(ThriftService.TSERV));
assertEquals(HostAndPort.fromString("127.0.0.1:9997"), ss.getAddress(ThriftService.TSERV));
assertEquals(ServiceDescriptor.DEFAULT_GROUP_NAME, ss.getGroup(ThriftService.TSERV));
assertEquals(serverUUID, ss.getServerUUID(ThriftService.TABLET_SCAN));
assertEquals("127.0.0.1:9998", ss.getAddressString(ThriftService.TABLET_SCAN));
assertEquals(HostAndPort.fromString("127.0.0.1:9998"),
ss.getAddress(ThriftService.TABLET_SCAN));
assertEquals(ServiceDescriptor.DEFAULT_GROUP_NAME, ss.getGroup(ThriftService.TSERV));
}
@Test
public void testSingleServiceConstructorWithGroup() throws Exception {
ServiceLockData ss = new ServiceLockData(serverUUID, "127.0.0.1", ThriftService.TSERV, "meta");
assertEquals(serverUUID, ss.getServerUUID(ThriftService.TSERV));
assertEquals("127.0.0.1", ss.getAddressString(ThriftService.TSERV));
assertThrows(IllegalArgumentException.class, () -> ss.getAddress(ThriftService.TSERV));
assertEquals("meta", ss.getGroup(ThriftService.TSERV));
assertNull(ss.getServerUUID(ThriftService.TABLET_SCAN));
assertNull(ss.getAddressString(ThriftService.TABLET_SCAN));
assertNull(ss.getAddress(ThriftService.TABLET_SCAN));
assertNull(ss.getGroup(ThriftService.TABLET_SCAN));
}
@Test
public void testSingleServiceConstructor2WithGroup() throws Exception {
ServiceLockData ss = new ServiceLockData(serverUUID, "127.0.0.1", ThriftService.TSERV, "meta");
assertEquals(serverUUID, ss.getServerUUID(ThriftService.TSERV));
assertEquals("127.0.0.1", ss.getAddressString(ThriftService.TSERV));
assertThrows(IllegalArgumentException.class, () -> ss.getAddress(ThriftService.TSERV));
assertEquals("meta", ss.getGroup(ThriftService.TSERV));
assertEquals(serverUUID, ss.getServerUUID(ThriftService.TSERV));
assertNull(ss.getAddressString(ThriftService.TABLET_SCAN));
assertNull(ss.getAddress(ThriftService.TABLET_SCAN));
assertNull(ss.getGroup(ThriftService.TABLET_SCAN));
}
@Test
public void testMultipleServiceConstructorWithGroup() throws Exception {
ServiceDescriptors sds = new ServiceDescriptors();
sds.addService(
new ServiceDescriptor(serverUUID, ThriftService.TSERV, "127.0.0.1:9997", "meta"));
sds.addService(
new ServiceDescriptor(serverUUID, ThriftService.TABLET_SCAN, "127.0.0.1:9998", "ns1"));
ServiceLockData ss = new ServiceLockData(sds);
assertEquals(serverUUID, ss.getServerUUID(ThriftService.TSERV));
assertEquals("127.0.0.1:9997", ss.getAddressString(ThriftService.TSERV));
assertEquals(HostAndPort.fromString("127.0.0.1:9997"), ss.getAddress(ThriftService.TSERV));
assertEquals("meta", ss.getGroup(ThriftService.TSERV));
assertEquals(serverUUID, ss.getServerUUID(ThriftService.TABLET_SCAN));
assertEquals("127.0.0.1:9998", ss.getAddressString(ThriftService.TABLET_SCAN));
assertEquals(HostAndPort.fromString("127.0.0.1:9998"),
ss.getAddress(ThriftService.TABLET_SCAN));
assertEquals("ns1", ss.getGroup(ThriftService.TABLET_SCAN));
assertNull(ss.getAddressString(ThriftService.COMPACTOR));
assertNull(ss.getAddress(ThriftService.COMPACTOR));
assertNull(ss.getGroup(ThriftService.COMPACTOR));
}
@Test
public void testParseEmpty() {
Optional<ServiceLockData> sld = ServiceLockData.parse(new byte[0]);
assertTrue(sld.isEmpty());
assertFalse(sld.isPresent());
}
}
| 9,354 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/lock/ServiceLockTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.lock;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.util.ArrayList;
import java.util.List;
import org.junit.jupiter.api.Test;
public class ServiceLockTest {
@Test
public void testSortAndFindLowestPrevPrefix() throws Exception {
List<String> children = new ArrayList<>();
children.add("zlock#00000000-0000-0000-0000-ffffffffffff#0000000007");
children.add("zlock#00000000-0000-0000-0000-eeeeeeeeeeee#0000000010");
children.add("zlock#00000000-0000-0000-0000-bbbbbbbbbbbb#0000000006");
children.add("zlock#00000000-0000-0000-0000-dddddddddddd#0000000008");
children.add("zlock#00000000-0000-0000-0000-bbbbbbbbbbbb#0000000004");
children.add("zlock-123456789");
children.add("zlock#00000000-0000-0000-0000-cccccccccccc#0000000003");
children.add("zlock#00000000-0000-0000-0000-aaaaaaaaaaaa#0000000002");
children.add("zlock#987654321");
children.add("zlock#00000000-0000-0000-0000-aaaaaaaaaaaa#0000000001");
final List<String> validChildren = ServiceLock.validateAndSort(ServiceLock.path(""), children);
assertEquals(8, validChildren.size());
assertEquals("zlock#00000000-0000-0000-0000-aaaaaaaaaaaa#0000000001", validChildren.get(0));
assertEquals("zlock#00000000-0000-0000-0000-aaaaaaaaaaaa#0000000002", validChildren.get(1));
assertEquals("zlock#00000000-0000-0000-0000-cccccccccccc#0000000003", validChildren.get(2));
assertEquals("zlock#00000000-0000-0000-0000-bbbbbbbbbbbb#0000000004", validChildren.get(3));
assertEquals("zlock#00000000-0000-0000-0000-bbbbbbbbbbbb#0000000006", validChildren.get(4));
assertEquals("zlock#00000000-0000-0000-0000-ffffffffffff#0000000007", validChildren.get(5));
assertEquals("zlock#00000000-0000-0000-0000-dddddddddddd#0000000008", validChildren.get(6));
assertEquals("zlock#00000000-0000-0000-0000-eeeeeeeeeeee#0000000010", validChildren.get(7));
assertEquals("zlock#00000000-0000-0000-0000-bbbbbbbbbbbb#0000000004",
ServiceLock.findLowestPrevPrefix(validChildren,
"zlock#00000000-0000-0000-0000-ffffffffffff#0000000007"));
assertEquals("zlock#00000000-0000-0000-0000-aaaaaaaaaaaa#0000000001",
ServiceLock.findLowestPrevPrefix(validChildren,
"zlock#00000000-0000-0000-0000-cccccccccccc#0000000003"));
assertEquals("zlock#00000000-0000-0000-0000-dddddddddddd#0000000008",
ServiceLock.findLowestPrevPrefix(validChildren,
"zlock#00000000-0000-0000-0000-eeeeeeeeeeee#0000000010"));
assertThrows(IndexOutOfBoundsException.class, () -> {
ServiceLock.findLowestPrevPrefix(validChildren,
"zlock#00000000-0000-0000-0000-aaaaaaaaaaaa#0000000001");
});
assertThrows(IndexOutOfBoundsException.class, () -> {
ServiceLock.findLowestPrevPrefix(validChildren,
"zlock#00000000-0000-0000-0000-XXXXXXXXXXXX#0000000099");
});
}
}
| 9,355 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/compaction/ShellCompactCommandConfigurerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
import static org.apache.accumulo.core.conf.ConfigurationTypeHelper.getFixedMemoryAsBytes;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.accumulo.core.client.PluginEnvironment;
import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer;
import org.apache.accumulo.core.client.admin.compaction.CompactionConfigurer.Overrides;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.junit.jupiter.api.Test;
public class ShellCompactCommandConfigurerTest {
@Test
public void testOutputOptions() throws URISyntaxException {
Collection<CompactableFile> files = Set.of(CompactableFile
.create(new URI("hdfs://nn1/accumulo/tables/1/t-009/F00001.rf"), 50000, 400));
// test setting no output options
ShellCompactCommandConfigurer ccs = new ShellCompactCommandConfigurer();
Map<String,String> opts = new HashMap<>();
var initParams = new CompactionConfigurer.InitParameters() {
@Override
public TableId getTableId() {
return TableId.of("1");
}
@Override
public Map<String,String> getOptions() {
return opts;
}
@Override
public PluginEnvironment getEnvironment() {
return null;
}
};
ccs.init(initParams);
var inputParams = new CompactionConfigurer.InputParameters() {
@Override
public TableId getTableId() {
return null;
}
@Override
public TabletId getTabletId() {
return null;
}
@Override
public Collection<CompactableFile> getInputFiles() {
return files;
}
@Override
public PluginEnvironment getEnvironment() {
return null;
}
};
Overrides plan = ccs.override(inputParams);
assertTrue(plan.getOverrides().isEmpty());
// test setting all output options
ccs = new ShellCompactCommandConfigurer();
CompactionSettings.OUTPUT_BLOCK_SIZE_OPT.put(null, opts, "64K");
CompactionSettings.OUTPUT_COMPRESSION_OPT.put(null, opts, "snappy");
CompactionSettings.OUTPUT_HDFS_BLOCK_SIZE_OPT.put(null, opts, "256M");
CompactionSettings.OUTPUT_INDEX_BLOCK_SIZE_OPT.put(null, opts, "32K");
CompactionSettings.OUTPUT_REPLICATION_OPT.put(null, opts, "5");
ccs.init(initParams);
plan = ccs.override(inputParams);
Map<String,
String> expected = Map.of(Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "snappy",
Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), getFixedMemoryAsBytes("64K") + "",
Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(),
getFixedMemoryAsBytes("32K") + "", Property.TABLE_FILE_BLOCK_SIZE.getKey(),
getFixedMemoryAsBytes("256M") + "", Property.TABLE_FILE_REPLICATION.getKey(), "5");
assertEquals(expected, plan.getOverrides());
}
}
| 9,356 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/compaction/ShellCompactCommandSelectorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.compaction;
import java.net.URISyntaxException;
import org.junit.jupiter.api.Test;
public class ShellCompactCommandSelectorTest {
@Test
public void testSelection() throws URISyntaxException {
// file selection options are adequately tested by ShellServerIT, so this is just a placeholder
}
}
| 9,357 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.cli;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Properties;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.conf.ClientProperty;
import org.junit.jupiter.api.Test;
public class TestClientOpts {
@Test
public void testBasic() {
ClientOpts opts = new ClientOpts();
String[] args = new String[] {"-u", "userabc", "-o", "instance.name=myinst", "-o",
"instance.zookeepers=zoo1,zoo2", "-o", "auth.type=password", "-o", "auth.principal=user123",
"-o", "auth.token=mypass"};
opts.parseArgs("test", args);
Properties props = opts.getClientProps();
assertEquals("user123", ClientProperty.AUTH_PRINCIPAL.getValue(props));
assertTrue(opts.getToken() instanceof PasswordToken);
assertEquals("myinst", props.getProperty("instance.name"));
opts = new ClientOpts();
args = new String[] {"-o", "instance.name=myinst", "-o", "instance.zookeepers=zoo1,zoo2", "-o",
"auth.type=password", "-o", "auth.token=mypass", "-u", "userabc"};
opts.parseArgs("test", args);
props = opts.getClientProps();
assertEquals("userabc", ClientProperty.AUTH_PRINCIPAL.getValue(props));
assertTrue(opts.getToken() instanceof PasswordToken);
assertEquals("myinst", props.getProperty("instance.name"));
}
@Test
public void testPassword() {
ClientOpts opts = new ClientOpts();
String[] args =
new String[] {"--password", "mypass", "-u", "userabc", "-o", "instance.name=myinst", "-o",
"instance.zookeepers=zoo1,zoo2", "-o", "auth.principal=user123"};
opts.parseArgs("test", args);
Properties props = opts.getClientProps();
assertEquals("user123", ClientProperty.AUTH_PRINCIPAL.getValue(props));
assertTrue(opts.getToken() instanceof PasswordToken);
assertTrue(opts.getToken().equals(new PasswordToken("mypass")));
assertEquals("myinst", props.getProperty("instance.name"));
}
}
| 9,358 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/cli/ConfigOptsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.cli;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ConfigOptsTest {
private ConfigOpts opts;
@BeforeEach
public void setUp() {
opts = new ConfigOpts();
}
@Test
public void testGetAddress() {
opts.parseArgs(ConfigOptsTest.class.getName(),
new String[] {"-o", Property.GENERAL_PROCESS_BIND_ADDRESS.getKey() + "=1.2.3.4"});
assertEquals("1.2.3.4", opts.getSiteConfiguration().get(Property.GENERAL_PROCESS_BIND_ADDRESS));
}
@Test
public void testGetAddress_NOne() {
opts.parseArgs(ConfigOptsTest.class.getName(), new String[] {});
assertEquals("0.0.0.0", opts.getSiteConfiguration().get(Property.GENERAL_PROCESS_BIND_ADDRESS));
}
@Test
public void testOverrideConfig() {
AccumuloConfiguration defaults = DefaultConfiguration.getInstance();
assertEquals("localhost:2181", defaults.get(Property.INSTANCE_ZK_HOST));
opts.parseArgs(ConfigOptsTest.class.getName(),
new String[] {"-o", "instance.zookeeper.host=test:123"});
assertEquals("test:123", opts.getSiteConfiguration().get(Property.INSTANCE_ZK_HOST));
}
}
| 9,359 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/cli/PasswordConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.cli;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.util.Scanner;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
public class PasswordConverterTest {
private class Password {
@Parameter(names = "--password", converter = ClientOpts.PasswordConverter.class)
String password;
}
private String[] argv;
private Password password;
private static InputStream realIn;
@BeforeAll
public static void saveIn() {
realIn = System.in;
}
@BeforeEach
public void setup() throws IOException {
argv = new String[] {"--password", ""};
password = new Password();
PipedInputStream in = new PipedInputStream();
PipedOutputStream out = new PipedOutputStream(in);
OutputStreamWriter osw = new OutputStreamWriter(out);
osw.write("secret");
osw.close();
System.setIn(in);
}
@AfterEach
public void teardown() {
System.setIn(realIn);
}
@Test
public void testPass() {
String expected = String.valueOf(RANDOM.get().nextDouble());
argv[1] = "pass:" + expected;
new JCommander(password).parse(argv);
assertEquals(expected, password.password);
}
@Test
public void testEnv() {
String name = System.getenv().keySet().iterator().next();
argv[1] = "env:" + name;
new JCommander(password).parse(argv);
assertEquals(System.getenv(name), password.password);
}
@Test
public void testFile() throws IOException {
argv[1] = "file:pom.xml";
Scanner scan = new Scanner(new File("pom.xml"), UTF_8);
String expected = scan.nextLine();
scan.close();
new JCommander(password).parse(argv);
assertEquals(expected, password.password);
}
@Test
public void testNoFile() {
argv[1] = "file:doesnotexist";
assertThrows(ParameterException.class, () -> new JCommander(password).parse(argv));
}
@Test
public void testStdin() {
argv[1] = "stdin";
new JCommander(password).parse(argv);
assertEquals("stdin", password.password);
}
@Test
public void testPlainText() {
argv[1] = "passwordString";
new JCommander(password).parse(argv);
assertEquals("passwordString", password.password);
}
}
| 9,360 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/cli/TestHelp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.cli;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.Test;
public class TestHelp {
protected class HelpStub extends Help {
@Override
public void parseArgs(String programName, String[] args, Object... others) {
super.parseArgs(programName, args, others);
}
@Override
public void exit(int status) {
throw new IllegalStateException(Integer.toString(status));
}
}
@Test
public void testInvalidArgs() {
String[] args = {"foo"};
HelpStub help = new HelpStub();
try {
help.parseArgs("program", args);
} catch (RuntimeException e) {
assertEquals("1", e.getMessage());
}
}
}
| 9,361 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/classloader/ContextClassLoaderFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.classloader;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.net.URLClassLoader;
import java.util.Objects;
import org.apache.accumulo.core.WithTestNames;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.Property;
import org.apache.commons.io.FileUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths not set by user input")
public class ContextClassLoaderFactoryTest extends WithTestNames {
@TempDir
private static File tempFolder;
private String uri1;
private String uri2;
@BeforeEach
public void setup() throws Exception {
File folder1 = new File(tempFolder, testName() + "_1");
assertTrue(folder1.isDirectory() || folder1.mkdir(), "Failed to make a new sub-directory");
FileUtils.copyURLToFile(
Objects.requireNonNull(this.getClass().getResource("/accumulo.properties")),
new File(folder1, "accumulo.properties"));
uri1 = new File(folder1, "accumulo.properties").toURI().toString();
File folder2 = new File(tempFolder, testName() + "_2");
assertTrue(folder2.isDirectory() || folder2.mkdir(), "Failed to make a new sub-directory");
FileUtils.copyURLToFile(
Objects.requireNonNull(this.getClass().getResource("/accumulo2.properties")),
new File(folder2, "accumulo2.properties"));
uri2 = folder2.toURI() + ".*";
}
@Test
public void differentContexts() {
ConfigurationCopy cc = new ConfigurationCopy();
cc.set(Property.GENERAL_CONTEXT_CLASSLOADER_FACTORY.getKey(),
URLContextClassLoaderFactory.class.getName());
ClassLoaderUtil.resetContextFactoryForTests();
ClassLoaderUtil.initContextFactory(cc);
URLClassLoader cl1 = (URLClassLoader) ClassLoaderUtil.getContextFactory().getClassLoader(uri1);
var urls1 = cl1.getURLs();
assertEquals(1, urls1.length);
assertEquals(uri1, urls1[0].toString());
URLClassLoader cl2 = (URLClassLoader) ClassLoaderUtil.getContextFactory().getClassLoader(uri2);
var urls2 = cl2.getURLs();
assertEquals(1, urls2.length);
assertEquals(uri2, urls2[0].toString());
}
}
| 9,362 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate/AgeOffStoreTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.HashSet;
import java.util.Set;
import org.apache.accumulo.core.fate.AgeOffStore.TimeSource;
import org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus;
import org.apache.zookeeper.KeeperException;
import org.junit.jupiter.api.Test;
public class AgeOffStoreTest {
private static class TestTimeSource implements TimeSource {
long time = 0;
@Override
public long currentTimeMillis() {
return time;
}
}
@Test
public void testBasic() throws InterruptedException, KeeperException {
TestTimeSource tts = new TestTimeSource();
TestStore testStore = new TestStore();
AgeOffStore<String> aoStore = new AgeOffStore<>(testStore, 10, tts);
aoStore.ageOff();
long txid1 = aoStore.create();
aoStore.reserve(txid1);
aoStore.setStatus(txid1, TStatus.IN_PROGRESS);
aoStore.unreserve(txid1, 0);
aoStore.ageOff();
long txid2 = aoStore.create();
aoStore.reserve(txid2);
aoStore.setStatus(txid2, TStatus.IN_PROGRESS);
aoStore.setStatus(txid2, TStatus.FAILED);
aoStore.unreserve(txid2, 0);
tts.time = 6;
long txid3 = aoStore.create();
aoStore.reserve(txid3);
aoStore.setStatus(txid3, TStatus.IN_PROGRESS);
aoStore.setStatus(txid3, TStatus.SUCCESSFUL);
aoStore.unreserve(txid3, 0);
Long txid4 = aoStore.create();
aoStore.ageOff();
assertEquals(Set.of(txid1, txid2, txid3, txid4), new HashSet<>(aoStore.list()));
assertEquals(4, new HashSet<>(aoStore.list()).size());
tts.time = 15;
aoStore.ageOff();
assertEquals(Set.of(txid1, txid3, txid4), new HashSet<>(aoStore.list()));
assertEquals(3, new HashSet<>(aoStore.list()).size());
tts.time = 30;
aoStore.ageOff();
assertEquals(Set.of(txid1), new HashSet<>(aoStore.list()));
assertEquals(1, Set.of(aoStore.list()).size());
}
@Test
public void testNonEmpty() throws InterruptedException, KeeperException {
// test age off when source store starts off non empty
TestTimeSource tts = new TestTimeSource();
TestStore testStore = new TestStore();
long txid1 = testStore.create();
testStore.reserve(txid1);
testStore.setStatus(txid1, TStatus.IN_PROGRESS);
testStore.unreserve(txid1, 0);
long txid2 = testStore.create();
testStore.reserve(txid2);
testStore.setStatus(txid2, TStatus.IN_PROGRESS);
testStore.setStatus(txid2, TStatus.FAILED);
testStore.unreserve(txid2, 0);
long txid3 = testStore.create();
testStore.reserve(txid3);
testStore.setStatus(txid3, TStatus.IN_PROGRESS);
testStore.setStatus(txid3, TStatus.SUCCESSFUL);
testStore.unreserve(txid3, 0);
Long txid4 = testStore.create();
AgeOffStore<String> aoStore = new AgeOffStore<>(testStore, 10, tts);
assertEquals(Set.of(txid1, txid2, txid3, txid4), new HashSet<>(aoStore.list()));
assertEquals(4, new HashSet<>(aoStore.list()).size());
aoStore.ageOff();
assertEquals(Set.of(txid1, txid2, txid3, txid4), new HashSet<>(aoStore.list()));
assertEquals(4, new HashSet<>(aoStore.list()).size());
tts.time = 15;
aoStore.ageOff();
assertEquals(Set.of(txid1), new HashSet<>(aoStore.list()));
assertEquals(1, new HashSet<>(aoStore.list()).size());
aoStore.reserve(txid1);
aoStore.setStatus(txid1, TStatus.FAILED_IN_PROGRESS);
aoStore.unreserve(txid1, 0);
tts.time = 30;
aoStore.ageOff();
assertEquals(Set.of(txid1), new HashSet<>(aoStore.list()));
assertEquals(1, new HashSet<>(aoStore.list()).size());
aoStore.reserve(txid1);
aoStore.setStatus(txid1, TStatus.FAILED);
aoStore.unreserve(txid1, 0);
aoStore.ageOff();
assertEquals(Set.of(txid1), new HashSet<>(aoStore.list()));
assertEquals(1, new HashSet<>(aoStore.list()).size());
tts.time = 42;
aoStore.ageOff();
assertEquals(0, new HashSet<>(aoStore.list()).size());
}
}
| 9,363 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Transient in memory store for transactions.
*/
public class TestStore extends ZooStore<String> {
private long nextId = 1;
private Map<Long,TStatus> statuses = new HashMap<>();
private Set<Long> reserved = new HashSet<>();
@Override
public long create() {
statuses.put(nextId, TStatus.NEW);
return nextId++;
}
@Override
public void reserve(long tid) {
if (reserved.contains(tid)) {
throw new IllegalStateException(); // zoo store would wait, but do not expect test to reserve
}
// twice... if test change, then change this
reserved.add(tid);
}
@Override
public boolean tryReserve(long tid) {
synchronized (this) {
if (!reserved.contains(tid)) {
reserve(tid);
return true;
}
return false;
}
}
@Override
public void unreserve(long tid, long deferTime) {
if (!reserved.remove(tid)) {
throw new IllegalStateException();
}
}
@Override
public org.apache.accumulo.core.fate.TStore.TStatus getStatus(long tid) {
if (!reserved.contains(tid)) {
throw new IllegalStateException();
}
TStatus status = statuses.get(tid);
if (status == null) {
return TStatus.UNKNOWN;
}
return status;
}
@Override
public void setStatus(long tid, org.apache.accumulo.core.fate.TStore.TStatus status) {
if (!reserved.contains(tid)) {
throw new IllegalStateException();
}
if (!statuses.containsKey(tid)) {
throw new IllegalStateException();
}
statuses.put(tid, status);
}
@Override
public void delete(long tid) {
if (!reserved.contains(tid)) {
throw new IllegalStateException();
}
statuses.remove(tid);
}
@Override
public List<Long> list() {
return new ArrayList<>(statuses.keySet());
}
}
| 9,364 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate/zookeeper/DistributedReadWriteLockTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicIntegerArray;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import org.apache.accumulo.core.fate.zookeeper.DistributedReadWriteLock.QueueLock;
import org.junit.jupiter.api.Test;
public class DistributedReadWriteLockTest {
// Non-zookeeper version of QueueLock
public static class MockQueueLock implements QueueLock {
long next = 0L;
final SortedMap<Long,byte[]> locks = new TreeMap<>();
@Override
public synchronized SortedMap<Long,byte[]> getEarlierEntries(long entry) {
SortedMap<Long,byte[]> result = new TreeMap<>();
result.putAll(locks.headMap(entry + 1));
return result;
}
@Override
public synchronized void removeEntry(long entry) {
synchronized (locks) {
locks.remove(entry);
locks.notifyAll();
}
}
@Override
public synchronized long addEntry(byte[] data) {
long result;
synchronized (locks) {
locks.put(result = next++, data);
locks.notifyAll();
}
return result;
}
}
// some data that is probably not going to update atomically
static class SomeData {
private AtomicIntegerArray data = new AtomicIntegerArray(100);
private AtomicInteger counter = new AtomicInteger();
void read() {
for (int i = 0; i < data.length(); i++) {
assertEquals(counter.get(), data.get(i));
}
}
void write() {
int nextCount = counter.incrementAndGet();
for (int i = data.length() - 1; i >= 0; i--) {
data.set(i, nextCount);
}
}
}
@Test
public void testLock() throws Exception {
final SomeData data = new SomeData();
data.write();
data.read();
QueueLock qlock = new MockQueueLock();
final ReadWriteLock locker = new DistributedReadWriteLock(qlock, "locker1".getBytes());
final Lock readLock = locker.readLock();
final Lock writeLock = locker.writeLock();
readLock.lock();
readLock.unlock();
writeLock.lock();
writeLock.unlock();
readLock.lock();
readLock.unlock();
// do a bunch of reads/writes in separate threads, look for inconsistent updates
Thread[] threads = new Thread[2];
for (int i = 0; i < threads.length; i++) {
final int which = i;
threads[i] = new Thread(() -> {
if (which % 2 == 0) {
final Lock wl = locker.writeLock();
wl.lock();
try {
data.write();
} finally {
wl.unlock();
}
} else {
final Lock rl = locker.readLock();
rl.lock();
data.read();
try {
data.read();
} finally {
rl.unlock();
}
}
});
}
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
}
}
| 9,365 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate/zookeeper/ZooCacheFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertSame;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ZooCacheFactoryTest {
private ZooCacheFactory zcf;
@BeforeEach
public void setUp() {
zcf = new ZooCacheFactory();
}
@AfterEach
public void tearDown() {
zcf.reset();
}
@Test
public void testGetZooCache() {
String zks1 = "zk1";
int timeout1 = 1000;
ZooCache zc1 = zcf.getZooCache(zks1, timeout1);
ZooCache zc1a = zcf.getZooCache(zks1, timeout1);
assertSame(zc1, zc1a);
String zks2 = "zk2";
int timeout2 = 1000;
ZooCache zc2 = zcf.getZooCache(zks2, timeout2);
assertNotSame(zc1, zc2);
String zks3 = "zk1";
int timeout3 = 2000;
ZooCache zc3 = zcf.getZooCache(zks3, timeout3);
assertNotSame(zc1, zc3);
}
@Test
public void testGetNewZooCache() {
String zks1 = "zk1";
int timeout1 = 1000;
ZooCache zc1 = zcf.getNewZooCache(zks1, timeout1);
assertNotNull(zc1);
ZooCache zc1a = zcf.getZooCache(zks1, timeout1);
assertNotSame(zc1, zc1a);
ZooCache zc1b = zcf.getNewZooCache(zks1, timeout1);
assertNotSame(zc1, zc1b);
assertNotSame(zc1a, zc1b);
}
@Test
public void testReset() {
String zks1 = "zk1";
int timeout1 = 1000;
ZooCache zc1 = zcf.getZooCache(zks1, timeout1);
zcf.reset();
ZooCache zc1a = zcf.getZooCache(zks1, timeout1);
assertNotSame(zc1, zc1a);
}
}
| 9,366 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate/zookeeper/ZooSessionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.junit.jupiter.api.Assertions.assertThrows;
import org.apache.zookeeper.ZooKeeper;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
public class ZooSessionTest {
private static final int TIMEOUT_SECONDS = 10;
private static final String UNKNOWN_HOST = "hostname.that.should.not.exist.example.com:2181";
@Test
@Timeout(TIMEOUT_SECONDS * 4)
public void testUnknownHost() {
assertThrows(RuntimeException.class, () -> {
ZooKeeper session = ZooSession.connect(UNKNOWN_HOST, (int) SECONDS.toMillis(TIMEOUT_SECONDS),
null, null, null);
session.close();
});
}
}
| 9,367 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate/zookeeper/ZooReaderWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.anyString;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.createMockBuilder;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.expectLastCall;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter.Mutator;
import org.apache.accumulo.core.util.Retry;
import org.apache.accumulo.core.util.Retry.RetryFactory;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.BadVersionException;
import org.apache.zookeeper.KeeperException.Code;
import org.apache.zookeeper.KeeperException.ConnectionLossException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.KeeperException.SessionExpiredException;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ZooReaderWriterTest {
private ZooReaderWriter zrw;
private ZooKeeper zk;
private RetryFactory retryFactory;
private Retry retry;
@BeforeEach
public void setup() {
zk = createMock(ZooKeeper.class);
zrw = createMockBuilder(ZooReaderWriter.class)
.addMockedMethods("getRetryFactory", "getZooKeeper").createMock();
retryFactory = createMock(RetryFactory.class);
retry = createMock(Retry.class);
expect(zrw.getZooKeeper()).andReturn(zk).anyTimes();
expect(zrw.getRetryFactory()).andReturn(retryFactory).anyTimes();
expect(retryFactory.createRetry()).andReturn(retry).anyTimes();
}
@Test
public void testDeleteSucceedOnInitialNoNode() throws Exception {
final String path = "/foo";
zk.delete(path, -1);
expectLastCall().andThrow(KeeperException.create(Code.NONODE));
replay(zk, zrw, retryFactory, retry);
zrw.delete(path);
verify(zk, zrw, retryFactory, retry);
}
@Test
public void testDeleteSucceedOnRetry() throws Exception {
final String path = "/foo";
zk.delete(path, -1);
expectLastCall().andThrow(KeeperException.create(Code.CONNECTIONLOSS));
expect(retry.canRetry()).andReturn(true);
retry.useRetry();
expectLastCall().once();
retry.waitForNextAttempt(anyObject(), anyString());
expectLastCall().once();
zk.delete(path, -1);
expectLastCall().andThrow(KeeperException.create(Code.NONODE));
replay(zk, zrw, retryFactory, retry);
zrw.delete(path);
verify(zk, zrw, retryFactory, retry);
}
@Test
public void testMutateNodeCreationFails() throws Exception {
final String path = "/foo";
final byte[] value = {0};
Mutator mutator = currentValue -> new byte[] {1};
zk.create(path, value, ZooUtil.PUBLIC, CreateMode.PERSISTENT);
expectLastCall().andThrow(new SessionExpiredException()).once();
expect(retry.canRetry()).andReturn(false);
expect(retry.retriesCompleted()).andReturn(1L).once();
replay(zk, zrw, retryFactory, retry);
assertThrows(SessionExpiredException.class, () -> zrw.mutateOrCreate(path, value, mutator));
verify(zk, zrw, retryFactory, retry);
}
@Test
public void testMutateWithBadVersion() throws Exception {
final String path = "/foo";
final byte[] value = {0};
final byte[] mutatedBytes = {1};
Mutator mutator = currentValue -> mutatedBytes;
Stat stat = new Stat();
zk.create(path, value, ZooUtil.PUBLIC, CreateMode.PERSISTENT);
expectLastCall().andThrow(new NodeExistsException()).once();
expect(zk.getData(path, null, stat)).andReturn(new byte[] {3}).times(2);
// BadVersionException should retry
expect(zk.setData(path, mutatedBytes, 0)).andThrow(new BadVersionException());
// Let 2nd setData succeed
expect(zk.setData(path, mutatedBytes, 0)).andReturn(null);
retry.waitForNextAttempt(anyObject(), anyObject());
expectLastCall().once();
replay(zk, zrw, retryFactory, retry);
assertArrayEquals(new byte[] {1}, zrw.mutateOrCreate(path, value, mutator));
verify(zk, zrw, retryFactory, retry);
}
@Test
public void testMutateWithRetryOnSetData() throws Exception {
final String path = "/foo";
final byte[] value = {0};
final byte[] mutatedBytes = {1};
Mutator mutator = currentValue -> mutatedBytes;
Stat stat = new Stat();
zk.create(path, value, ZooUtil.PUBLIC, CreateMode.PERSISTENT);
expectLastCall().andThrow(new NodeExistsException()).once();
expect(zk.getData(path, null, stat)).andReturn(new byte[] {3}).times(2);
// transient connection loss should retry
expect(zk.setData(path, mutatedBytes, 0)).andThrow(new ConnectionLossException());
expect(retry.canRetry()).andReturn(true);
retry.useRetry();
expectLastCall();
retry.waitForNextAttempt(anyObject(), anyString());
expectLastCall();
// Let 2nd setData succeed
expect(zk.setData(path, mutatedBytes, 0)).andReturn(null);
replay(zk, zrw, retryFactory, retry);
assertArrayEquals(new byte[] {1}, zrw.mutateOrCreate(path, value, mutator));
verify(zk, zrw, retryFactory, retry);
}
}
| 9,368 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate/zookeeper/ZooUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.ArrayList;
import java.util.List;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class ZooUtilTest {
Logger log = LoggerFactory.getLogger(ZooUtilTest.class);
@Test
void checkUnmodifiable() throws Exception {
assertTrue(validateACL(ZooUtil.PRIVATE));
assertTrue(validateACL(ZooUtil.PUBLIC));
}
@Test
public void checkImmutableAcl() throws Exception {
final List<ACL> mutable = new ArrayList<>(ZooDefs.Ids.CREATOR_ALL_ACL);
assertTrue(validateACL(mutable));
// Replicates the acl check in ZooKeeper.java to show ZooKeeper will not accept an
// ImmutableCollection for the ACL list. ZooKeeper (as of 3.8.1) calls
// acl.contains((Object) null) which throws a NPE when passed an immutable collectionCallers
// because the way ImmutableCollections.contains() handles nulls (JDK-8265905)
try {
final List<ACL> immutable = List.copyOf(ZooDefs.Ids.CREATOR_ALL_ACL);
assertThrows(NullPointerException.class, () -> validateACL(immutable));
} catch (Exception ex) {
log.warn("validateAcls failed with exception", ex);
}
}
// Copied from ZooKeeper 3.8.1 for stand-alone testing here
// https://github.com/apache/zookeeper/blob/2e9c3f3ceda90aeb9380acc87b253bf7661b7794/zookeeper-server/src/main/java/org/apache/zookeeper/ZooKeeper.java#L3075/
private boolean validateACL(List<ACL> acl) throws KeeperException.InvalidACLException {
if (acl == null || acl.isEmpty() || acl.contains((Object) null)) {
throw new KeeperException.InvalidACLException();
}
return true;
}
}
| 9,369 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/fate/zookeeper/ZooCacheTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.fate.zookeeper;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.capture;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.createStrictMock;
import static org.easymock.EasyMock.eq;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.expectLastCall;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.List;
import org.apache.accumulo.core.fate.zookeeper.ZooCache.ZcStat;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import org.easymock.Capture;
import org.easymock.EasyMock;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ZooCacheTest {
private static final String ZPATH = "/some/path/in/zk";
private static final byte[] DATA = {(byte) 1, (byte) 2, (byte) 3, (byte) 4};
private static final List<String> CHILDREN = java.util.Arrays.asList("huey", "dewey", "louie");
private ZooReader zr;
private ZooKeeper zk;
private ZooCache zc;
@BeforeEach
public void setUp() {
zr = createMock(ZooReader.class);
zk = createStrictMock(ZooKeeper.class);
expect(zr.getZooKeeper()).andReturn(zk);
expectLastCall().anyTimes();
replay(zr);
zc = new ZooCache(zr, null);
}
@Test
public void testGet() throws Exception {
testGet(false);
}
@Test
public void testGet_FillStat() throws Exception {
testGet(true);
}
private void testGet(boolean fillStat) throws Exception {
ZcStat myStat = null;
if (fillStat) {
myStat = new ZcStat();
}
final long ephemeralOwner = 123456789L;
Stat existsStat = new Stat();
existsStat.setEphemeralOwner(ephemeralOwner);
expect(zk.exists(eq(ZPATH), anyObject(Watcher.class))).andReturn(existsStat);
expect(zk.getData(eq(ZPATH), anyObject(Watcher.class), eq(existsStat))).andReturn(DATA);
replay(zk);
assertFalse(zc.dataCached(ZPATH));
assertArrayEquals(DATA, (fillStat ? zc.get(ZPATH, myStat) : zc.get(ZPATH)));
verify(zk);
if (fillStat) {
assertEquals(ephemeralOwner, myStat.getEphemeralOwner());
}
assertTrue(zc.dataCached(ZPATH));
assertSame(DATA, zc.get(ZPATH)); // cache hit
}
@Test
public void testGet_NonExistent() throws Exception {
expect(zk.exists(eq(ZPATH), anyObject(Watcher.class))).andReturn(null);
replay(zk);
assertNull(zc.get(ZPATH));
verify(zk);
}
@Test
public void testGet_Retry_NoNode() throws Exception {
testGet_Retry(new KeeperException.NoNodeException(ZPATH));
}
@Test
public void testGet_Retry_ConnectionLoss() throws Exception {
testGet_Retry(new KeeperException.ConnectionLossException());
}
@Test
public void testGet_Retry_BadVersion() throws Exception {
testGet_Retry(new KeeperException.BadVersionException(ZPATH));
}
@Test
public void testGet_Retry_Interrupted() throws Exception {
testGet_Retry(new InterruptedException());
}
private void testGet_Retry(Exception e) throws Exception {
expect(zk.exists(eq(ZPATH), anyObject(Watcher.class))).andThrow(e);
Stat existsStat = new Stat();
expect(zk.exists(eq(ZPATH), anyObject(Watcher.class))).andReturn(existsStat);
expect(zk.getData(eq(ZPATH), anyObject(Watcher.class), eq(existsStat))).andReturn(DATA);
replay(zk);
assertArrayEquals(DATA, zc.get(ZPATH));
verify(zk);
}
@Test
public void testGet_Retry2_NoNode() throws Exception {
testGet_Retry2(new KeeperException.NoNodeException(ZPATH));
}
@Test
public void testGet_Retry2_ConnectionLoss() throws Exception {
testGet_Retry2(new KeeperException.ConnectionLossException());
}
@Test
public void testGet_Retry2_BadVersion() throws Exception {
testGet_Retry2(new KeeperException.BadVersionException(ZPATH));
}
@Test
public void testGet_Retry2_Interrupted() throws Exception {
testGet_Retry2(new InterruptedException());
}
private void testGet_Retry2(Exception e) throws Exception {
Stat existsStat = new Stat();
expect(zk.exists(eq(ZPATH), anyObject(Watcher.class))).andReturn(existsStat);
expect(zk.getData(eq(ZPATH), anyObject(Watcher.class), eq(existsStat))).andThrow(e);
expect(zk.exists(eq(ZPATH), anyObject(Watcher.class))).andReturn(existsStat);
expect(zk.getData(eq(ZPATH), anyObject(Watcher.class), eq(existsStat))).andReturn(DATA);
replay(zk);
assertArrayEquals(DATA, zc.get(ZPATH));
verify(zk);
}
// ---
@Test
public void testGetChildren() throws Exception {
expect(zk.getChildren(eq(ZPATH), anyObject(Watcher.class))).andReturn(CHILDREN);
replay(zk);
assertFalse(zc.childrenCached(ZPATH));
assertEquals(CHILDREN, zc.getChildren(ZPATH));
verify(zk);
assertTrue(zc.childrenCached(ZPATH));
// cannot check for sameness, return value is wrapped each time
assertEquals(CHILDREN, zc.getChildren(ZPATH)); // cache hit
}
@Test
public void testGetChildren_NoKids() throws Exception {
expect(zk.getChildren(eq(ZPATH), anyObject(Watcher.class))).andReturn(null);
replay(zk);
assertNull(zc.getChildren(ZPATH));
verify(zk);
assertNull(zc.getChildren(ZPATH)); // cache hit
}
@Test
public void testGetChildren_Retry() throws Exception {
expect(zk.getChildren(eq(ZPATH), anyObject(Watcher.class)))
.andThrow(new KeeperException.BadVersionException(ZPATH));
expect(zk.getChildren(eq(ZPATH), anyObject(Watcher.class))).andReturn(CHILDREN);
replay(zk);
assertEquals(CHILDREN, zc.getChildren(ZPATH));
verify(zk);
}
@Test
public void testGetChildren_EatNoNode() throws Exception {
expect(zk.getChildren(eq(ZPATH), anyObject(Watcher.class)))
.andThrow(new KeeperException.NoNodeException(ZPATH));
replay(zk);
assertNull(zc.getChildren(ZPATH));
verify(zk);
}
private static class TestWatcher implements Watcher {
private final WatchedEvent expectedEvent;
private boolean wasCalled;
TestWatcher(WatchedEvent event) {
expectedEvent = event;
wasCalled = false;
}
@Override
public void process(WatchedEvent event) {
assertSame(expectedEvent, event);
wasCalled = true;
}
boolean wasCalled() {
return wasCalled;
}
}
@Test
public void testWatchDataNode_Deleted() throws Exception {
testWatchDataNode(DATA, Watcher.Event.EventType.NodeDeleted, false);
}
@Test
public void testWatchDataNode_DataChanged() throws Exception {
testWatchDataNode(DATA, Watcher.Event.EventType.NodeDataChanged, false);
}
@Test
public void testWatchDataNode_Created() throws Exception {
testWatchDataNode(null, Watcher.Event.EventType.NodeCreated, false);
}
@Test
public void testWatchDataNode_NoneSyncConnected() throws Exception {
testWatchDataNode(null, Watcher.Event.EventType.None, true);
}
private void testWatchDataNode(byte[] initialData, Watcher.Event.EventType eventType,
boolean stillCached) throws Exception {
WatchedEvent event =
new WatchedEvent(eventType, Watcher.Event.KeeperState.SyncConnected, ZPATH);
TestWatcher exw = new TestWatcher(event);
zc = new ZooCache(zr, exw);
Watcher w = watchData(initialData);
w.process(event);
assertTrue(exw.wasCalled());
assertEquals(stillCached, zc.dataCached(ZPATH));
}
private Watcher watchData(byte[] initialData) throws Exception {
Capture<Watcher> cw = EasyMock.newCapture();
Stat existsStat = new Stat();
if (initialData != null) {
expect(zk.exists(eq(ZPATH), capture(cw))).andReturn(existsStat);
expect(zk.getData(eq(ZPATH), anyObject(Watcher.class), eq(existsStat)))
.andReturn(initialData);
} else {
expect(zk.exists(eq(ZPATH), capture(cw))).andReturn(null);
}
replay(zk);
zc.get(ZPATH);
assertTrue(zc.dataCached(ZPATH));
return cw.getValue();
}
@Test
public void testWatchDataNode_Disconnected() throws Exception {
testWatchDataNode_Clear(Watcher.Event.KeeperState.Disconnected);
}
@Test
public void testWatchDataNode_Expired() throws Exception {
testWatchDataNode_Clear(Watcher.Event.KeeperState.Expired);
}
private void testWatchDataNode_Clear(Watcher.Event.KeeperState state) throws Exception {
WatchedEvent event = new WatchedEvent(Watcher.Event.EventType.None, state, null);
TestWatcher exw = new TestWatcher(event);
zc = new ZooCache(zr, exw);
Watcher w = watchData(DATA);
assertTrue(zc.dataCached(ZPATH));
w.process(event);
assertTrue(exw.wasCalled());
assertFalse(zc.dataCached(ZPATH));
}
@Test
public void testWatchChildrenNode_Deleted() throws Exception {
testWatchChildrenNode(CHILDREN, Watcher.Event.EventType.NodeDeleted, false);
}
@Test
public void testWatchChildrenNode_ChildrenChanged() throws Exception {
testWatchChildrenNode(CHILDREN, Watcher.Event.EventType.NodeChildrenChanged, false);
}
@Test
public void testWatchChildrenNode_Created() throws Exception {
testWatchChildrenNode(null, Watcher.Event.EventType.NodeCreated, false);
}
@Test
public void testWatchChildrenNode_NoneSyncConnected() throws Exception {
testWatchChildrenNode(CHILDREN, Watcher.Event.EventType.None, true);
}
private void testWatchChildrenNode(List<String> initialChildren,
Watcher.Event.EventType eventType, boolean stillCached) throws Exception {
WatchedEvent event =
new WatchedEvent(eventType, Watcher.Event.KeeperState.SyncConnected, ZPATH);
TestWatcher exw = new TestWatcher(event);
zc = new ZooCache(zr, exw);
Watcher w = watchChildren(initialChildren);
w.process(event);
assertTrue(exw.wasCalled());
assertEquals(stillCached, zc.childrenCached(ZPATH));
}
private Watcher watchChildren(List<String> initialChildren) throws Exception {
Capture<Watcher> cw = EasyMock.newCapture();
expect(zk.getChildren(eq(ZPATH), capture(cw))).andReturn(initialChildren);
replay(zk);
zc.getChildren(ZPATH);
assertTrue(zc.childrenCached(ZPATH));
return cw.getValue();
}
}
| 9,370 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/compaction/DefaultCompactionPlannerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.compaction;
import static com.google.common.collect.MoreCollectors.onlyElement;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.accumulo.core.spi.common.ServiceEnvironment.Configuration;
import org.apache.accumulo.core.spi.compaction.CompactionPlan.Builder;
import org.apache.accumulo.core.util.compaction.CompactionExecutorIdImpl;
import org.apache.accumulo.core.util.compaction.CompactionPlanImpl;
import org.easymock.EasyMock;
import org.junit.jupiter.api.Test;
public class DefaultCompactionPlannerTest {
private static <T> T getOnlyElement(Collection<T> c) {
return c.stream().collect(onlyElement());
}
@Test
public void testFindFilesToCompact() throws Exception {
testFFtC(createCFs("F4", "1M", "F5", "1M", "F6", "1M"),
createCFs("F1", "100M", "F2", "100M", "F3", "100M", "F4", "1M", "F5", "1M", "F6", "1M"),
2.0);
testFFtC(createCFs("F1", "100M", "F2", "100M", "F3", "100M", "F4", "1M"), 2.0);
testFFtC(
createCFs("F1", "100M", "F2", "99M", "F3", "33M", "F4", "33M", "F5", "33M", "F6", "33M"),
2.0);
testFFtC(
createCFs("F1", "100M", "F2", "99M", "F3", "33M", "F4", "33M", "F5", "33M", "F6", "33M"),
3.0);
testFFtC(createCFs("F3", "10M", "F4", "10M", "F5", "10M", "F6", "10M"),
createCFs("F1", "50M", "F2", "49M", "F3", "10M", "F4", "10M", "F5", "10M", "F6", "10M"),
2.0);
testFFtC(createCFs("F3", "10M", "F4", "10M", "F5", "10M", "F6", "10M"),
createCFs("F1", "50M", "F2", "49M", "F3", "10M", "F4", "10M", "F5", "10M", "F6", "10M"),
3.0);
testFFtC(createCFs("S1", "1M", "S2", "1M", "S3", "1M", "S4", "1M"),
createCFs("B1", "100M", "B2", "100M", "B3", "100M", "B4", "100M", "M1", "10M", "M2", "10M",
"M3", "10M", "M4", "10M", "S1", "1M", "S2", "1M", "S3", "1M", "S4", "1M"),
3.0);
testFFtC(createCFs("M1", "10M", "M2", "10M", "M3", "10M", "M4", "10M", "C1", "4M"),
createCFs("B1", "100M", "B2", "100M", "B3", "100M", "B4", "100M", "M1", "10M", "M2", "10M",
"M3", "10M", "M4", "10M", "C1", "4M"),
3.0);
testFFtC(createCFs("B1", "100M", "B2", "100M", "B3", "100M", "B4", "100M", "C2", "44M"),
createCFs("B1", "100M", "B2", "100M", "B3", "100M", "B4", "100M", "C2", "44M"), 3.0);
testFFtC(createCFs(), createCFs("C3", "444M"), 3.0);
testFFtC(createCFs(), createCFs("A1", "17M", "S1", "11M", "S2", "11M", "S3", "11M"), 3.0);
testFFtC(createCFs("A1", "16M", "S1", "11M", "S2", "11M", "S3", "11M"), 3.0);
testFFtC(
createCFs("A1", "1M", "A2", "1M", "A3", "1M", "A4", "1M", "A5", "3M", "A6", "3M", "A7",
"5M", "A8", "5M"),
createCFs("A1", "1M", "A2", "1M", "A3", "1M", "A4", "1M", "A5", "3M", "A6", "3M", "A7",
"5M", "A8", "5M", "A9", "100M", "A10", "100M", "A11", "100M", "A12", "500M"),
3.0);
testFFtC(
createCFs("F1", "100M", "F2", "99M", "F3", "33M", "F4", "33M", "F5", "33M", "F6", "33M"),
3.0);
testFFtC(createCFs("F3", "10M", "F4", "9M", "F5", "8M", "F6", "7M"),
createCFs("F1", "12M", "F2", "11M", "F3", "10M", "F4", "9M", "F5", "8M", "F6", "7M"), 3.0,
4);
testFFtC(createCFs("F3", "4M", "F4", "8M", "F5", "9M", "F6", "10M"),
createCFs("F1", "1M", "F2", "2M", "F3", "4M", "F4", "8M", "F5", "9M", "F6", "10M"), 3.0, 4);
testFFtC(createCFs(),
createCFs("F1", "1M", "F2", "2M", "F3", "4M", "F4", "8M", "F5", "16M", "F6", "32M"), 3.0,
4);
testFFtC(createCFs(), createCFs("F1", "200M", "F2", "200M", "F3", "200M", "F4", "200M", "F5",
"200M", "F6", "200M"), 3.0, 4, 100_000_000L);
testFFtC(createCFs("F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M"),
createCFs("F1", "1M", "F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M", "F6", "30M"), 3.0,
4, 100_000_000L);
testFFtC(createCFs("F1", "1M", "F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M"),
createCFs("F1", "1M", "F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M", "F6", "30M"), 3.0,
8, 100_000_000L);
testFFtC(createCFs("F1", "1M", "F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M", "F6", "30M"),
createCFs("F1", "1M", "F2", "2M", "F3", "30M", "F4", "30M", "F5", "30M", "F6", "30M"), 3.0,
8, 200_000_000L);
}
@Test
public void testRunningCompaction() throws Exception {
var planner = createPlanner(true);
var all = createCFs("F1", "3M", "F2", "3M", "F3", "11M", "F4", "12M", "F5", "13M");
var candidates = createCFs("F3", "11M", "F4", "12M", "F5", "13M");
var compacting =
Set.of(createJob(CompactionKind.SYSTEM, all, createCFs("F1", "3M", "F2", "3M")));
var params = createPlanningParams(all, candidates, compacting, 2, CompactionKind.SYSTEM);
var plan = planner.makePlan(params);
// The result of the running compaction could be included in a future compaction, so the planner
// should wait.
assertTrue(plan.getJobs().isEmpty());
all = createCFs("F1", "30M", "F2", "30M", "F3", "11M", "F4", "12M", "F5", "13M");
candidates = createCFs("F3", "11M", "F4", "12M", "F5", "13M");
compacting = Set.of(createJob(CompactionKind.SYSTEM, all, createCFs("F1", "30M", "F2", "30M")));
params = createPlanningParams(all, candidates, compacting, 2, CompactionKind.SYSTEM);
plan = planner.makePlan(params);
// The result of the running compaction would not be included in future compactions, so the
// planner should compact.
var job = getOnlyElement(plan.getJobs());
assertEquals(candidates, job.getFiles());
assertEquals(CompactionExecutorIdImpl.externalId("medium"), job.getExecutor());
}
@Test
public void testUserCompaction() throws Exception {
var planner = createPlanner(true);
var all = createCFs("F1", "3M", "F2", "3M", "F3", "11M", "F4", "12M", "F5", "13M");
var candidates = createCFs("F3", "11M", "F4", "12M", "F5", "13M");
var compacting =
Set.of(createJob(CompactionKind.SYSTEM, all, createCFs("F1", "3M", "F2", "3M")));
var params = createPlanningParams(all, candidates, compacting, 2, CompactionKind.USER);
var plan = planner.makePlan(params);
// a running non-user compaction should not prevent a user compaction
var job = getOnlyElement(plan.getJobs());
assertEquals(candidates, job.getFiles());
assertEquals(CompactionExecutorIdImpl.externalId("medium"), job.getExecutor());
// should only run one user compaction at a time
compacting = Set.of(createJob(CompactionKind.USER, all, createCFs("F1", "3M", "F2", "3M")));
params = createPlanningParams(all, candidates, compacting, 2, CompactionKind.USER);
plan = planner.makePlan(params);
assertTrue(plan.getJobs().isEmpty());
// 17 files that do not meet the compaction ratio, when max files to compact is 15 should do 3
// files then 15
all = createCFs("F1", "1M", "F2", "2M", "F3", "4M", "F4", "8M", "F5", "16M", "F6", "32M", "F7",
"64M", "F8", "128M", "F9", "256M", "FA", "512M", "FB", "1G", "FC", "2G", "FD", "4G", "FE",
"8G", "FF", "16G", "FG", "32G", "FH", "64G");
compacting = Set.of();
params = createPlanningParams(all, all, compacting, 2, CompactionKind.USER);
plan = planner.makePlan(params);
job = getOnlyElement(plan.getJobs());
assertEquals(createCFs("F1", "1M", "F2", "2M", "F3", "4M"), job.getFiles());
assertEquals(CompactionExecutorIdImpl.externalId("small"), job.getExecutor());
// should compact all 15
all = createCFs("FI", "7M", "F4", "8M", "F5", "16M", "F6", "32M", "F7", "64M", "F8", "128M",
"F9", "256M", "FA", "512M", "FB", "1G", "FC", "2G", "FD", "4G", "FE", "8G", "FF", "16G",
"FG", "32G", "FH", "64G");
params = createPlanningParams(all, all, compacting, 2, CompactionKind.USER);
plan = planner.makePlan(params);
job = getOnlyElement(plan.getJobs());
assertEquals(all, job.getFiles());
assertEquals(CompactionExecutorIdImpl.externalId("huge"), job.getExecutor());
// For user compaction, can compact a subset that meets the compaction ratio if there is also a
// larger set of files the meets the compaction ratio
all = createCFs("F1", "3M", "F2", "4M", "F3", "5M", "F4", "6M", "F5", "50M", "F6", "51M", "F7",
"52M");
params = createPlanningParams(all, all, compacting, 2, CompactionKind.USER);
plan = planner.makePlan(params);
job = getOnlyElement(plan.getJobs());
assertEquals(createCFs("F1", "3M", "F2", "4M", "F3", "5M", "F4", "6M"), job.getFiles());
assertEquals(CompactionExecutorIdImpl.externalId("small"), job.getExecutor());
// There is a subset of small files that meets the compaction ratio, but the larger set does not
// so compact everything to avoid doing more than logarithmic work
all = createCFs("F1", "3M", "F2", "4M", "F3", "5M", "F4", "6M", "F5", "50M");
params = createPlanningParams(all, all, compacting, 2, CompactionKind.USER);
plan = planner.makePlan(params);
job = getOnlyElement(plan.getJobs());
assertEquals(all, job.getFiles());
assertEquals(CompactionExecutorIdImpl.externalId("medium"), job.getExecutor());
}
@Test
public void testMaxSize() throws Exception {
var planner = createPlanner(false);
var all = createCFs("F1", "128M", "F2", "129M", "F3", "130M", "F4", "131M", "F5", "132M");
var params = createPlanningParams(all, all, Set.of(), 2, CompactionKind.SYSTEM);
var plan = planner.makePlan(params);
// should only compact files less than max size
var job = getOnlyElement(plan.getJobs());
assertEquals(createCFs("F1", "128M", "F2", "129M", "F3", "130M"), job.getFiles());
assertEquals(CompactionExecutorIdImpl.externalId("large"), job.getExecutor());
// user compaction can exceed the max size
params = createPlanningParams(all, all, Set.of(), 2, CompactionKind.USER);
plan = planner.makePlan(params);
job = getOnlyElement(plan.getJobs());
assertEquals(all, job.getFiles());
assertEquals(CompactionExecutorIdImpl.externalId("large"), job.getExecutor());
}
/**
* Tests internal type executor with no numThreads set throws error
*/
@Test
public void testErrorInternalTypeNoNumThreads() {
DefaultCompactionPlanner planner = new DefaultCompactionPlanner();
Configuration conf = EasyMock.createMock(Configuration.class);
EasyMock.expect(conf.isSet(EasyMock.anyString())).andReturn(false).anyTimes();
ServiceEnvironment senv = EasyMock.createMock(ServiceEnvironment.class);
EasyMock.expect(senv.getConfiguration()).andReturn(conf).anyTimes();
EasyMock.replay(conf, senv);
String executors = getExecutors("'type': 'internal','maxSize':'32M'",
"'type': 'internal','maxSize':'128M','numThreads':2",
"'type': 'internal','maxSize':'512M','numThreads':3");
var e = assertThrows(NullPointerException.class,
() -> planner.init(getInitParams(senv, executors)), "Failed to throw error");
assertTrue(e.getMessage().contains("numThreads"), "Error message didn't contain numThreads");
}
/**
* Test external type executor with numThreads set throws error.
*/
@Test
public void testErrorExternalTypeNumThreads() {
DefaultCompactionPlanner planner = new DefaultCompactionPlanner();
Configuration conf = EasyMock.createMock(Configuration.class);
EasyMock.expect(conf.isSet(EasyMock.anyString())).andReturn(false).anyTimes();
ServiceEnvironment senv = EasyMock.createMock(ServiceEnvironment.class);
EasyMock.expect(senv.getConfiguration()).andReturn(conf).anyTimes();
EasyMock.replay(conf, senv);
String executors = getExecutors("'type': 'internal','maxSize':'32M','numThreads':1",
"'type': 'internal','maxSize':'128M','numThreads':2",
"'type': 'external','maxSize':'512M','numThreads':3");
var e = assertThrows(IllegalArgumentException.class,
() -> planner.init(getInitParams(senv, executors)), "Failed to throw error");
assertTrue(e.getMessage().contains("numThreads"), "Error message didn't contain numThreads");
}
/**
* Tests external type executor missing queue throws error
*/
@Test
public void testErrorExternalNoQueue() {
DefaultCompactionPlanner planner = new DefaultCompactionPlanner();
Configuration conf = EasyMock.createMock(Configuration.class);
EasyMock.expect(conf.isSet(EasyMock.anyString())).andReturn(false).anyTimes();
ServiceEnvironment senv = EasyMock.createMock(ServiceEnvironment.class);
EasyMock.expect(senv.getConfiguration()).andReturn(conf).anyTimes();
EasyMock.replay(conf, senv);
String executors = getExecutors("'type': 'internal','maxSize':'32M','numThreads':1",
"'type': 'internal','maxSize':'128M','numThreads':2",
"'type': 'external','maxSize':'512M'");
var e = assertThrows(NullPointerException.class,
() -> planner.init(getInitParams(senv, executors)), "Failed to throw error");
assertTrue(e.getMessage().contains("queue"), "Error message didn't contain queue");
}
/**
* Tests executors can only have one without a max size.
*/
@Test
public void testErrorOnlyOneMaxSize() {
DefaultCompactionPlanner planner = new DefaultCompactionPlanner();
Configuration conf = EasyMock.createMock(Configuration.class);
EasyMock.expect(conf.isSet(EasyMock.anyString())).andReturn(false).anyTimes();
ServiceEnvironment senv = EasyMock.createMock(ServiceEnvironment.class);
EasyMock.expect(senv.getConfiguration()).andReturn(conf).anyTimes();
EasyMock.replay(conf, senv);
String executors = getExecutors("'type': 'internal','maxSize':'32M','numThreads':1",
"'type': 'internal','numThreads':2", "'type': 'external','queue':'q1'");
var e = assertThrows(IllegalArgumentException.class,
() -> planner.init(getInitParams(senv, executors)), "Failed to throw error");
assertTrue(e.getMessage().contains("maxSize"), "Error message didn't contain maxSize");
}
/**
* Tests executors can only have one without a max size.
*/
@Test
public void testErrorDuplicateMaxSize() {
DefaultCompactionPlanner planner = new DefaultCompactionPlanner();
Configuration conf = EasyMock.createMock(Configuration.class);
EasyMock.expect(conf.isSet(EasyMock.anyString())).andReturn(false).anyTimes();
ServiceEnvironment senv = EasyMock.createMock(ServiceEnvironment.class);
EasyMock.expect(senv.getConfiguration()).andReturn(conf).anyTimes();
EasyMock.replay(conf, senv);
String executors = getExecutors("'type': 'internal','maxSize':'32M','numThreads':1",
"'type': 'internal','maxSize':'128M','numThreads':2",
"'type': 'external','maxSize':'128M','queue':'q1'");
var e = assertThrows(IllegalArgumentException.class,
() -> planner.init(getInitParams(senv, executors)), "Failed to throw error");
assertTrue(e.getMessage().contains("maxSize"), "Error message didn't contain maxSize");
}
private CompactionPlanner.InitParameters getInitParams(ServiceEnvironment senv,
String executors) {
return new CompactionPlanner.InitParameters() {
@Override
public ServiceEnvironment getServiceEnvironment() {
return senv;
}
@Override
public Map<String,String> getOptions() {
return Map.of("executors", executors, "maxOpen", "15");
}
@Override
public String getFullyQualifiedOption(String key) {
assertEquals("maxOpen", key);
return Property.TSERV_COMPACTION_SERVICE_PREFIX.getKey() + "cs1.planner.opts." + key;
}
@Override
public ExecutorManager getExecutorManager() {
return new ExecutorManager() {
@Override
public CompactionExecutorId createExecutor(String name, int threads) {
return CompactionExecutorIdImpl.externalId(name);
}
@Override
public CompactionExecutorId getExternalExecutor(String name) {
return CompactionExecutorIdImpl.externalId(name);
}
};
}
};
}
private String getExecutors(String small, String medium, String large) {
String execBldr = "[{'name':'small'," + small + "},{'name':'medium'," + medium + "},"
+ "{'name':'large'," + large + "}]";
return execBldr.replaceAll("'", "\"");
}
private CompactionJob createJob(CompactionKind kind, Set<CompactableFile> all,
Set<CompactableFile> files) {
return new CompactionPlanImpl.BuilderImpl(kind, all, all)
.addJob((short) all.size(), CompactionExecutorIdImpl.externalId("small"), files).build()
.getJobs().iterator().next();
}
private static Set<CompactableFile> createCFs(String... namesSizePairs)
throws URISyntaxException {
Set<CompactableFile> files = new HashSet<>();
for (int i = 0; i < namesSizePairs.length; i += 2) {
String name = namesSizePairs[i];
long size = ConfigurationTypeHelper.getFixedMemoryAsBytes(namesSizePairs[i + 1]);
files.add(CompactableFile
.create(new URI("hdfs://fake/accumulo/tables/1/t-0000000z/" + name + ".rf"), size, 0));
}
return files;
}
private static void testFFtC(Set<CompactableFile> expected, double ratio) {
testFFtC(expected, expected, ratio, 100);
}
private static void testFFtC(Set<CompactableFile> expected, Set<CompactableFile> files,
double ratio) {
testFFtC(expected, files, ratio, 100);
}
private static void testFFtC(Set<CompactableFile> expected, Set<CompactableFile> files,
double ratio, int maxFiles) {
testFFtC(expected, files, ratio, maxFiles, Long.MAX_VALUE);
}
private static void testFFtC(Set<CompactableFile> expected, Set<CompactableFile> files,
double ratio, int maxFiles, long maxSize) {
var result = DefaultCompactionPlanner.findDataFilesToCompact(files, ratio, maxFiles, maxSize);
var expectedNames = expected.stream().map(CompactableFile::getUri).map(URI::getPath)
.map(path -> path.split("/")).map(t -> t[t.length - 1]).collect(Collectors.toSet());
var resultNames = result.stream().map(CompactableFile::getUri).map(URI::getPath)
.map(path -> path.split("/")).map(t -> t[t.length - 1]).collect(Collectors.toSet());
assertEquals(expectedNames, resultNames);
}
private static CompactionPlanner.PlanningParameters createPlanningParams(Set<CompactableFile> all,
Set<CompactableFile> candidates, Set<CompactionJob> compacting, double ratio,
CompactionKind kind) {
return new CompactionPlanner.PlanningParameters() {
@Override
public TableId getTableId() {
return TableId.of("42");
}
@Override
public ServiceEnvironment getServiceEnvironment() {
throw new UnsupportedOperationException();
}
@Override
public Collection<CompactionJob> getRunningCompactions() {
return compacting;
}
@Override
public double getRatio() {
return ratio;
}
@Override
public CompactionKind getKind() {
return kind;
}
@Override
public Map<String,String> getExecutionHints() {
return Map.of();
}
@Override
public Collection<CompactableFile> getCandidates() {
return candidates;
}
@Override
public Collection<CompactableFile> getAll() {
return all;
}
@Override
public Builder createPlanBuilder() {
return new CompactionPlanImpl.BuilderImpl(kind, all, candidates);
}
};
}
private static DefaultCompactionPlanner createPlanner(boolean withHugeExecutor) {
DefaultCompactionPlanner planner = new DefaultCompactionPlanner();
Configuration conf = EasyMock.createMock(Configuration.class);
EasyMock.expect(conf.isSet(EasyMock.anyString())).andReturn(false).anyTimes();
ServiceEnvironment senv = EasyMock.createMock(ServiceEnvironment.class);
EasyMock.expect(senv.getConfiguration()).andReturn(conf).anyTimes();
EasyMock.replay(conf, senv);
StringBuilder execBldr =
new StringBuilder("[{'name':'small','type': 'internal','maxSize':'32M','numThreads':1},"
+ "{'name':'medium','type': 'internal','maxSize':'128M','numThreads':2},"
+ "{'name':'large','type': 'internal','maxSize':'512M','numThreads':3}");
if (withHugeExecutor) {
execBldr.append(",{'name':'huge','type': 'internal','numThreads':4}]");
} else {
execBldr.append("]");
}
String executors = execBldr.toString().replaceAll("'", "\"");
planner.init(new CompactionPlanner.InitParameters() {
@Override
public ServiceEnvironment getServiceEnvironment() {
return senv;
}
@Override
public Map<String,String> getOptions() {
return Map.of("executors", executors, "maxOpen", "15");
}
@Override
public String getFullyQualifiedOption(String key) {
assertEquals("maxOpen", key);
return Property.TSERV_COMPACTION_SERVICE_PREFIX.getKey() + "cs1.planner.opts." + key;
}
@Override
public ExecutorManager getExecutorManager() {
return new ExecutorManager() {
@Override
public CompactionExecutorId createExecutor(String name, int threads) {
switch (name) {
case "small":
assertEquals(1, threads);
break;
case "medium":
assertEquals(2, threads);
break;
case "large":
assertEquals(3, threads);
break;
case "huge":
assertEquals(4, threads);
break;
default:
fail("Unexpected name " + name);
break;
}
return CompactionExecutorIdImpl.externalId(name);
}
@Override
public CompactionExecutorId getExternalExecutor(String name) {
throw new UnsupportedOperationException();
}
};
}
});
return planner;
}
}
| 9,371 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/scan/HintScanPrioritizerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.accumulo.core.spi.scan.ScanInfo.Type;
import org.junit.jupiter.api.Test;
public class HintScanPrioritizerTest {
@Test
public void testSort() {
long now = System.currentTimeMillis();
List<TestScanInfo> scans = new ArrayList<>();
// Two following have never run, so oldest should go first
scans.add(new TestScanInfo("a", Type.SINGLE, now - 7));
scans.add(
new TestScanInfo("b", Type.SINGLE, now - 3).setExecutionHints("scan_type", "background"));
scans.add(
new TestScanInfo("c", Type.SINGLE, now - 4).setExecutionHints("scan_type", "background"));
scans.add(new TestScanInfo("d", Type.SINGLE, now - 3).setExecutionHints("scan_type", "isbn"));
scans.add(new TestScanInfo("e", Type.SINGLE, now - 5).setExecutionHints("scan_type", "isbn"));
scans.add(new TestScanInfo("f", Type.SINGLE, now - 1).setExecutionHints("priority", "35"));
scans.add(new TestScanInfo("g", Type.SINGLE, now - 2).setExecutionHints("priority", "25"));
scans.add(new TestScanInfo("h", Type.SINGLE, now - 3).setExecutionHints("priority", "15"));
scans.add(new TestScanInfo("i", Type.SINGLE, now - 4).setExecutionHints("priority", "5"));
Collections.shuffle(scans);
Comparator<ScanInfo> comparator =
new HintScanPrioritizer().createComparator(new ScanPrioritizer.CreateParameters() {
@Override
public Map<String,String> getOptions() {
return Map.of("priority.isbn", "10", "priority.background", "30", "default_priority",
"20");
}
@Override
public ServiceEnvironment getServiceEnv() {
throw new UnsupportedOperationException();
}
});
scans.sort(comparator);
assertEquals("i", scans.get(0).testId);
assertEquals("e", scans.get(1).testId);
assertEquals("d", scans.get(2).testId);
assertEquals("h", scans.get(3).testId);
assertEquals("a", scans.get(4).testId);
assertEquals("g", scans.get(5).testId);
assertEquals("c", scans.get(6).testId);
assertEquals("b", scans.get(7).testId);
assertEquals("f", scans.get(8).testId);
}
}
| 9,372 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/scan/ConfigurableScanServerSelectorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.time.Duration;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class ConfigurableScanServerSelectorTest {
static class InitParams implements ScanServerSelector.InitParameters {
private final Map<String,String> opts;
private final Map<String,String> scanServers;
InitParams(Set<String> scanServers) {
this(scanServers, Map.of());
}
InitParams(Set<String> scanServers, Map<String,String> opts) {
this.opts = opts;
this.scanServers = new HashMap<>();
scanServers.forEach(
sserv -> this.scanServers.put(sserv, ScanServerSelector.DEFAULT_SCAN_SERVER_GROUP_NAME));
}
InitParams(Map<String,String> scanServers, Map<String,String> opts) {
this.opts = opts;
this.scanServers = scanServers;
}
@Override
public Map<String,String> getOptions() {
return opts;
}
@Override
public ServiceEnvironment getServiceEnv() {
throw new UnsupportedOperationException();
}
@Override
public Supplier<Collection<ScanServerInfo>> getScanServers() {
return () -> scanServers.entrySet().stream().map(entry -> new ScanServerInfo() {
@Override
public String getAddress() {
return entry.getKey();
}
@Override
public String getGroup() {
return entry.getValue();
}
}).collect(Collectors.toSet());
}
}
static class DaParams implements ScanServerSelector.SelectorParameters {
private final Collection<TabletId> tablets;
private final Map<TabletId,Collection<? extends ScanServerAttempt>> attempts;
private final Map<String,String> hints;
DaParams(TabletId tablet) {
this.tablets = Set.of(tablet);
this.attempts = Map.of();
this.hints = Map.of();
}
DaParams(TabletId tablet, Map<TabletId,Collection<? extends ScanServerAttempt>> attempts,
Map<String,String> hints) {
this.tablets = Set.of(tablet);
this.attempts = attempts;
this.hints = hints;
}
@Override
public Collection<TabletId> getTablets() {
return tablets;
}
@Override
public Collection<? extends ScanServerAttempt> getAttempts(TabletId tabletId) {
return attempts.getOrDefault(tabletId, Set.of());
}
@Override
public Map<String,String> getHints() {
return hints;
}
}
static class TestScanServerAttempt implements ScanServerAttempt {
private final String server;
private final Result result;
TestScanServerAttempt(String server, Result result) {
this.server = server;
this.result = result;
}
@Override
public String getServer() {
return server;
}
@Override
public Result getResult() {
return result;
}
}
public static TabletId nti(String tableId, String endRow) {
return new TabletIdImpl(
new KeyExtent(TableId.of(tableId), endRow == null ? null : new Text(endRow), null));
}
@Test
public void testBasic() {
ConfigurableScanServerSelector selector = new ConfigurableScanServerSelector();
selector.init(new InitParams(
Set.of("ss1:1", "ss2:2", "ss3:3", "ss4:4", "ss5:5", "ss6:6", "ss7:7", "ss8:8")));
Set<String> servers = new HashSet<>();
for (int i = 0; i < 100; i++) {
var tabletId = nti("1", "m");
ScanServerSelections actions = selector.selectServers(new DaParams(tabletId));
servers.add(actions.getScanServer(tabletId));
}
assertEquals(3, servers.size());
}
private void runBusyTest(int numServers, int busyAttempts, int expectedServers,
long expectedBusyTimeout) {
runBusyTest(numServers, busyAttempts, expectedServers, expectedBusyTimeout, Map.of());
}
private void runBusyTest(int numServers, int busyAttempts, int expectedServers,
long expectedBusyTimeout, Map<String,String> opts) {
runBusyTest(numServers, busyAttempts, expectedServers, expectedBusyTimeout, opts, Map.of());
}
private void runBusyTest(int numServers, int busyAttempts, int expectedServers,
long expectedBusyTimeout, Map<String,String> opts, Map<String,String> hints) {
ConfigurableScanServerSelector selector = new ConfigurableScanServerSelector();
var servers = Stream.iterate(1, i -> i <= numServers, i -> i + 1).map(i -> "s" + i + ":" + i)
.collect(Collectors.toSet());
selector.init(new InitParams(servers, opts));
Set<String> serversSeen = new HashSet<>();
var tabletId = nti("1", "m");
var tabletAttempts = Stream.iterate(1, i -> i <= busyAttempts, i -> i + 1)
.map(i -> (new TestScanServerAttempt("ss" + i + ":" + i, ScanServerAttempt.Result.BUSY)))
.collect(Collectors.toList());
Map<TabletId,Collection<? extends ScanServerAttempt>> attempts = new HashMap<>();
attempts.put(tabletId, tabletAttempts);
for (int i = 0; i < 100 * numServers; i++) {
ScanServerSelections actions =
selector.selectServers(new DaParams(tabletId, attempts, hints));
assertEquals(expectedBusyTimeout, actions.getBusyTimeout().toMillis());
assertEquals(0, actions.getDelay().toMillis());
serversSeen.add(actions.getScanServer(tabletId));
}
assertEquals(expectedServers, serversSeen.size());
}
@Test
public void testBusy() {
runBusyTest(1000, 0, 3, 33);
runBusyTest(1000, 1, 13, 33);
runBusyTest(1000, 2, 1000, 33);
runBusyTest(1000, 3, 1000, 33 * 8);
runBusyTest(1000, 4, 1000, 33 * 8 * 8);
runBusyTest(1000, 6, 1000, 33 * 8 * 8 * 8 * 8);
runBusyTest(1000, 7, 1000, 300000);
runBusyTest(1000, 20, 1000, 300000);
runBusyTest(27, 0, 3, 33);
runBusyTest(27, 1, 13, 33);
runBusyTest(27, 2, 27, 33);
runBusyTest(27, 3, 27, 33 * 8);
runBusyTest(6, 0, 3, 33);
runBusyTest(6, 1, 6, 33);
runBusyTest(6, 2, 6, 33);
runBusyTest(6, 3, 6, 33 * 8);
for (int i = 0; i < 2; i++) {
runBusyTest(1, i, 1, 33);
runBusyTest(2, i, 2, 33);
runBusyTest(3, i, 3, 33);
}
}
@Test
public void testCoverage() {
ConfigurableScanServerSelector selector = new ConfigurableScanServerSelector();
var servers = Stream.iterate(1, i -> i <= 20, i -> i + 1).map(i -> "s" + i + ":" + i)
.collect(Collectors.toSet());
selector.init(new InitParams(servers));
Map<String,Long> allServersSeen = new HashMap<>();
for (int t = 0; t < 10000; t++) {
Set<String> serversSeen = new HashSet<>();
String endRow =
Long.toString(Math.abs(Math.max(RANDOM.get().nextLong(), Long.MIN_VALUE + 1)), 36);
var tabletId = t % 1000 == 0 ? nti("" + t, null) : nti("" + t, endRow);
for (int i = 0; i < 100; i++) {
ScanServerSelections actions = selector.selectServers(new DaParams(tabletId));
serversSeen.add(actions.getScanServer(tabletId));
allServersSeen.merge(actions.getScanServer(tabletId), 1L, Long::sum);
}
assertEquals(3, serversSeen.size());
}
assertEquals(20, allServersSeen.size());
var stats = allServersSeen.values().stream().mapToLong(l -> l - 50000).summaryStatistics();
assertTrue(stats.getMin() > -5000 && stats.getMax() < 5000);
}
@Test
public void testOpts() {
String defaultProfile =
"{'isDefault':true,'maxBusyTimeout':'5m','busyTimeoutMultiplier':4, 'attemptPlans':"
+ "[{'servers':'5', 'busyTimeout':'5ms'},{'servers':'20', 'busyTimeout':'33ms'},"
+ "{'servers':'50%', 'busyTimeout':'100ms'},{'servers':'100%', 'busyTimeout':'200ms'}]}";
String profile1 = "{'scanTypeActivations':['long','st9'],'maxBusyTimeout':'30m',"
+ "'busyTimeoutMultiplier':4, 'attemptPlans':[{'servers':'2', 'busyTimeout':'10s'},"
+ "{'servers':'4', 'busyTimeout':'2m'},{'servers':'10%', 'busyTimeout':'5m'}]}";
String profile2 =
"{'scanTypeActivations':['mega'],'maxBusyTimeout':'60m','busyTimeoutMultiplier':2, "
+ "'attemptPlans':[{'servers':'100%', 'busyTimeout':'10m'}]}";
// Intentionally put the default profile in 2nd position. There was a bug where config parsing
// would fail if the default did not come first.
var opts = Map.of("profiles",
"[" + profile1 + ", " + defaultProfile + "," + profile2 + "]".replace('\'', '"'));
runBusyTest(1000, 0, 5, 5, opts);
runBusyTest(1000, 1, 20, 33, opts);
runBusyTest(1000, 2, 500, 100, opts);
runBusyTest(1000, 3, 1000, 200, opts);
runBusyTest(1000, 4, 1000, 200 * 4, opts);
runBusyTest(1000, 5, 1000, 200 * 4 * 4, opts);
runBusyTest(1000, 8, 1000, 200 * 4 * 4 * 4 * 4 * 4, opts);
runBusyTest(1000, 9, 1000, 300000, opts);
runBusyTest(1000, 10, 1000, 300000, opts);
var hints = Map.of("scan_type", "long");
runBusyTest(1000, 0, 2, 10000, opts, hints);
runBusyTest(1000, 1, 4, 120000, opts, hints);
runBusyTest(1000, 2, 100, 300000, opts, hints);
runBusyTest(1000, 3, 100, 1200000, opts, hints);
runBusyTest(1000, 4, 100, 1800000, opts, hints);
runBusyTest(1000, 50, 100, 1800000, opts, hints);
hints = Map.of("scan_type", "st9");
runBusyTest(1000, 0, 2, 10000, opts, hints);
hints = Map.of("scan_type", "mega");
runBusyTest(1000, 0, 1000, 600000, opts, hints);
runBusyTest(1000, 1, 1000, 1200000, opts, hints);
// test case where no profile is activated by a scan_type, so should use default profile
hints = Map.of("scan_type", "st7");
runBusyTest(1000, 0, 5, 5, opts, hints);
}
@Test
public void testUnknownOpts() {
var opts = Map.of("abc", "3");
var exception =
assertThrows(IllegalArgumentException.class, () -> runBusyTest(1000, 0, 5, 66, opts));
assertTrue(exception.getMessage().contains("abc"));
}
@Test
public void testIncorrectOptions() {
String defaultProfile = "{'isDefault':true,'maxBusyTimeout':'5m','busyTimeoutMultiplier':4, "
+ "'attemptPlans':[{'servers':'5', 'busyTimeout':'5ms'},"
+ "{'servers':'20', 'busyTimeout':'33ms'},{'servers':'50%', 'busyTimeout':'100ms'},"
+ "{'servers':'100%', 'busyTimeout':'200ms'}]}";
String profile1 = "{'scanTypeActivations':['long','mega'],'maxBusyTimeout':'30m',"
+ "'busyTimeoutMultiplier':4, 'attemptPlans':[{'servers':'2', 'busyTimeout':'10s'},"
+ "{'servers':'4', 'busyTimeout':'2m'},{'servers':'10%', 'busyTimeout':'5m'}]}";
String profile2 =
"{'scanTypeActivations':['mega'],'maxBusyTimeout':'60m','busyTimeoutMultiplier':2, "
+ "'attemptPlans':[{'servers':'100%', 'busyTimeout':'10m'}]}";
var opts1 = Map.of("profiles",
"[" + defaultProfile + ", " + profile1 + "," + profile2 + "]".replace('\'', '"'));
// two profiles activate on the scan type "mega", so should fail
var exception =
assertThrows(IllegalArgumentException.class, () -> runBusyTest(1000, 0, 5, 66, opts1));
assertTrue(exception.getMessage().contains("mega"));
var opts2 = Map.of("profiles", "[" + profile1 + "]".replace('\'', '"'));
// missing a default profile, so should fail
exception =
assertThrows(IllegalArgumentException.class, () -> runBusyTest(1000, 0, 5, 66, opts2));
assertTrue(exception.getMessage().contains("default"));
}
@Test
public void testNoScanServers() {
ConfigurableScanServerSelector selector = new ConfigurableScanServerSelector();
selector.init(new InitParams(Set.of()));
var tabletId = nti("1", "m");
ScanServerSelections actions = selector.selectServers(new DaParams(tabletId));
assertNull(actions.getScanServer(tabletId));
assertEquals(Duration.ZERO, actions.getDelay());
assertEquals(Duration.ZERO, actions.getBusyTimeout());
}
@Test
public void testGroups() {
String defaultProfile =
"{'isDefault':true,'maxBusyTimeout':'5m','busyTimeoutMultiplier':4, 'attemptPlans':"
+ "[{'servers':'100%', 'busyTimeout':'60s'}]}";
String profile1 = "{'scanTypeActivations':['long','st9'],'maxBusyTimeout':'30m','group':'g1',"
+ "'busyTimeoutMultiplier':4, 'attemptPlans':[{'servers':'100%', 'busyTimeout':'60s'}]}";
String profile2 =
"{'scanTypeActivations':['mega'],'maxBusyTimeout':'60m','busyTimeoutMultiplier':2, 'group':'g2',"
+ "'attemptPlans':[{'servers':'100%', 'busyTimeout':'10m'}]}";
var opts = Map.of("profiles",
"[" + defaultProfile + ", " + profile1 + "," + profile2 + "]".replace('\'', '"'));
ConfigurableScanServerSelector selector = new ConfigurableScanServerSelector();
var dg = ScanServerSelector.DEFAULT_SCAN_SERVER_GROUP_NAME;
selector.init(new InitParams(Map.of("ss1:1", dg, "ss2:2", dg, "ss3:3", dg, "ss4:4", "g1",
"ss5:5", "g1", "ss6:6", "g2", "ss7:7", "g2", "ss8:8", "g2"), opts));
Set<String> servers = new HashSet<>();
for (int i = 0; i < 1000; i++) {
var tabletId = nti("1", "m" + i);
ScanServerSelections actions = selector.selectServers(new DaParams(tabletId));
servers.add(actions.getScanServer(tabletId));
}
assertEquals(Set.of("ss1:1", "ss2:2", "ss3:3"), servers);
// config should map this scan type to the group of scan servers g1
var hints = Map.of("scan_type", "long");
servers.clear();
for (int i = 0; i < 1000; i++) {
var tabletId = nti("1", "m" + i);
ScanServerSelections actions =
selector.selectServers(new DaParams(tabletId, Map.of(), hints));
servers.add(actions.getScanServer(tabletId));
}
assertEquals(Set.of("ss4:4", "ss5:5"), servers);
// config should map this scan type to the group of scan servers g2
hints = Map.of("scan_type", "mega");
servers.clear();
for (int i = 0; i < 1000; i++) {
var tabletId = nti("1", "m" + i);
ScanServerSelections actions =
selector.selectServers(new DaParams(tabletId, Map.of(), hints));
servers.add(actions.getScanServer(tabletId));
}
assertEquals(Set.of("ss6:6", "ss7:7", "ss8:8"), servers);
// config does map this scan type to anything, so should use the default group of scan servers
hints = Map.of("scan_type", "rust");
servers.clear();
for (int i = 0; i < 1000; i++) {
var tabletId = nti("1", "m" + i);
ScanServerSelections actions =
selector.selectServers(new DaParams(tabletId, Map.of(), hints));
servers.add(actions.getScanServer(tabletId));
}
assertEquals(Set.of("ss1:1", "ss2:2", "ss3:3"), servers);
}
}
| 9,373 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/scan/SimpleScanDispatcherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import static org.apache.accumulo.core.spi.scan.ScanDispatch.CacheUsage.DISABLED;
import static org.apache.accumulo.core.spi.scan.ScanDispatch.CacheUsage.ENABLED;
import static org.apache.accumulo.core.spi.scan.ScanDispatch.CacheUsage.OPPORTUNISTIC;
import static org.apache.accumulo.core.spi.scan.ScanDispatch.CacheUsage.TABLE;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.accumulo.core.spi.scan.ScanDispatch.CacheUsage;
import org.apache.accumulo.core.spi.scan.ScanDispatcher.DispatchParameters;
import org.apache.accumulo.core.spi.scan.ScanInfo.Type;
import org.junit.jupiter.api.Test;
public class SimpleScanDispatcherTest {
@Test
public void testProps() {
assertTrue(Property.TSERV_SCAN_EXECUTORS_DEFAULT_THREADS.getKey()
.endsWith(SimpleScanDispatcher.DEFAULT_SCAN_EXECUTOR_NAME + ".threads"));
assertTrue(Property.TSERV_SCAN_EXECUTORS_DEFAULT_PRIORITIZER.getKey()
.endsWith(SimpleScanDispatcher.DEFAULT_SCAN_EXECUTOR_NAME + ".prioritizer"));
}
private static class DispatchParametersImps implements DispatchParameters {
private ScanInfo si;
private Map<String,ScanExecutor> se;
DispatchParametersImps(ScanInfo si, Map<String,ScanExecutor> se) {
this.si = si;
this.se = se;
}
@Override
public ScanInfo getScanInfo() {
return si;
}
@Override
public Map<String,ScanExecutor> getScanExecutors() {
return se;
}
@Override
public ServiceEnvironment getServiceEnv() {
throw new UnsupportedOperationException();
}
}
private void runTest(Map<String,String> opts, Map<String,String> hints, String expectedSingle,
String expectedMulti, CacheUsage expectedIndexCU, CacheUsage expectedDataCU) {
TestScanInfo msi = new TestScanInfo("a", Type.MULTI, 4);
msi.executionHints = hints;
TestScanInfo ssi = new TestScanInfo("a", Type.SINGLE, 4);
ssi.executionHints = hints;
SimpleScanDispatcher ssd1 = new SimpleScanDispatcher();
ssd1.init(new ScanDispatcher.InitParameters() {
@Override
public TableId getTableId() {
throw new UnsupportedOperationException();
}
@Override
public Map<String,String> getOptions() {
return opts;
}
@Override
public ServiceEnvironment getServiceEnv() {
throw new UnsupportedOperationException();
}
});
Map<String,ScanExecutor> executors = new HashMap<>();
executors.put("E1", null);
executors.put("E2", null);
executors.put("E3", null);
ScanDispatch multiPrefs = ssd1.dispatch(new DispatchParametersImps(msi, executors));
assertEquals(expectedMulti, multiPrefs.getExecutorName());
assertEquals(expectedIndexCU, multiPrefs.getIndexCacheUsage());
assertEquals(expectedDataCU, multiPrefs.getDataCacheUsage());
ScanDispatch singlePrefs = ssd1.dispatch(new DispatchParametersImps(ssi, executors));
assertEquals(expectedSingle, singlePrefs.getExecutorName());
assertEquals(expectedIndexCU, singlePrefs.getIndexCacheUsage());
assertEquals(expectedDataCU, singlePrefs.getDataCacheUsage());
}
private void runTest(Map<String,String> opts, String expectedSingle, String expectedMulti) {
runTest(opts, Collections.emptyMap(), expectedSingle, expectedMulti, TABLE, TABLE);
}
@Test
public void testBasic() {
String dname = SimpleScanDispatcher.DEFAULT_SCAN_EXECUTOR_NAME;
runTest(Collections.emptyMap(), dname, dname);
runTest(Map.of("executor", "E1"), "E1", "E1");
runTest(Map.of("single_executor", "E2"), "E2", dname);
runTest(Map.of("multi_executor", "E3"), dname, "E3");
runTest(Map.of("executor", "E1", "single_executor", "E2"), "E2", "E1");
runTest(Map.of("executor", "E1", "multi_executor", "E3"), "E1", "E3");
runTest(Map.of("single_executor", "E2", "multi_executor", "E3"), "E2", "E3");
runTest(Map.of("executor", "E1", "single_executor", "E2", "multi_executor", "E3"), "E2", "E3");
}
@Test
public void testHints() {
runTest(Map.of("executor", "E1"), Map.of("scan_type", "quick"), "E1", "E1", TABLE, TABLE);
runTest(Map.of("executor", "E1", "executor.quick", "E2"), Map.of("scan_type", "quick"), "E2",
"E2", TABLE, TABLE);
runTest(Map.of("executor", "E1", "executor.quick", "E2", "executor.slow", "E3"),
Map.of("scan_type", "slow"), "E3", "E3", TABLE, TABLE);
}
@Test
public void testCache() {
String dname = SimpleScanDispatcher.DEFAULT_SCAN_EXECUTOR_NAME;
runTest(
Map.of("executor", "E1", "cacheUsage.slow.index", "opportunistic", "cacheUsage.slow.data",
"disabled", "cacheUsage.fast", "enabled", "executor.slow", "E2"),
Map.of("scan_type", "slow"), "E2", "E2", OPPORTUNISTIC, DISABLED);
runTest(
Map.of("single_executor", "E1", "cacheUsage.slow.index", "opportunistic",
"cacheUsage.slow.data", "disabled", "cacheUsage.fast", "enabled"),
Map.of("scan_type", "fast"), "E1", dname, ENABLED, ENABLED);
runTest(
Map.of("executor", "E1", "cacheUsage.slow.index", "opportunistic", "cacheUsage.slow.data",
"disabled", "cacheUsage.fast", "enabled"),
Map.of("scan_type", "notconfigured"), "E1", "E1", TABLE, TABLE);
runTest(Map.of("executor", "E1", "cacheUsage.slow.index", "opportunistic",
"cacheUsage.slow.data", "disabled", "cacheUsage.fast", "enabled"), Map.of(), "E1", "E1",
TABLE, TABLE);
}
}
| 9,374 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/scan/IdleRatioScanPrioritizerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.accumulo.core.spi.scan.ScanInfo.Type;
import org.junit.jupiter.api.Test;
public class IdleRatioScanPrioritizerTest {
@Test
public void testSort() {
long now = System.currentTimeMillis();
List<TestScanInfo> scans = new ArrayList<>();
// Two following have never run, so oldest should go first
scans.add(new TestScanInfo("a", Type.SINGLE, now - 3));
scans.add(new TestScanInfo("b", Type.SINGLE, now - 8));
// Two following have different idle ratio and same last run times
scans.add(new TestScanInfo("c", Type.SINGLE, now - 16, 2, 10));
scans.add(new TestScanInfo("d", Type.SINGLE, now - 16, 5, 10));
// Two following have same idle ratio and different last run times
scans.add(new TestScanInfo("e", Type.SINGLE, now - 12, 5, 9));
scans.add(new TestScanInfo("f", Type.SINGLE, now - 12, 3, 7));
Collections.shuffle(scans);
Comparator<ScanInfo> comparator =
new IdleRatioScanPrioritizer().createComparator(new ScanPrioritizer.CreateParameters() {
@Override
public Map<String,String> getOptions() {
return Collections.emptyMap();
}
@Override
public ServiceEnvironment getServiceEnv() {
throw new UnsupportedOperationException();
}
});
scans.sort(comparator);
assertEquals("b", scans.get(0).testId);
assertEquals("a", scans.get(1).testId);
assertEquals("f", scans.get(2).testId);
assertEquals("e", scans.get(3).testId);
assertEquals("d", scans.get(4).testId);
assertEquals("c", scans.get(5).testId);
}
}
| 9,375 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/scan/TestScanInfo.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.scan;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.OptionalLong;
import java.util.Set;
import org.apache.accumulo.core.data.Column;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.IteratorConfiguration;
import org.apache.accumulo.core.spi.common.Stats;
import org.apache.accumulo.core.util.Stat;
public class TestScanInfo implements ScanInfo {
String testId;
Type scanType;
long creationTime;
OptionalLong lastRunTime = OptionalLong.empty();
Stat runTimeStats = new Stat();
Stat idleTimeStats = new Stat();
Map<String,String> executionHints = Collections.emptyMap();
TestScanInfo(String testId, Type scanType, long creationTime, int... times) {
this.testId = testId;
this.scanType = scanType;
this.creationTime = creationTime;
for (int i = 0; i < times.length; i += 2) {
long idleDuration = times[i] - (i == 0 ? 0 : times[i - 1]);
long runDuration = times[i + 1] - times[i];
runTimeStats.addStat(runDuration);
idleTimeStats.addStat(idleDuration);
}
if (times.length > 0) {
lastRunTime = OptionalLong.of(times[times.length - 1] + creationTime);
}
}
TestScanInfo setExecutionHints(String k, String v) {
this.executionHints = Map.of(k, v);
return this;
}
@Override
public Type getScanType() {
return scanType;
}
@Override
public TableId getTableId() {
throw new UnsupportedOperationException();
}
@Override
public long getCreationTime() {
return creationTime;
}
@Override
public OptionalLong getLastRunTime() {
return lastRunTime;
}
@Override
public Stats getRunTimeStats() {
return runTimeStats;
}
@Override
public Stats getIdleTimeStats() {
return idleTimeStats;
}
@Override
public Stats getIdleTimeStats(long currentTime) {
Stat copy = idleTimeStats.copy();
copy.addStat(currentTime - lastRunTime.orElse(creationTime));
return copy;
}
@Override
public Set<Column> getFetchedColumns() {
throw new UnsupportedOperationException();
}
@Override
public Collection<IteratorConfiguration> getClientScanIterators() {
throw new UnsupportedOperationException();
}
@Override
public Map<String,String> getExecutionHints() {
return executionHints;
}
}
| 9,376 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/balancer/HostRegexTableLoadBalancerReconfigurationTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.SiteConfiguration;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.manager.balancer.AssignmentParamsImpl;
import org.apache.accumulo.core.manager.balancer.BalanceParamsImpl;
import org.apache.accumulo.core.manager.balancer.TabletStatisticsImpl;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.apache.accumulo.core.spi.balancer.data.TabletStatistics;
import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
import org.apache.accumulo.core.util.ConfigurationImpl;
import org.apache.accumulo.core.util.UtilWaitThread;
import org.junit.jupiter.api.Test;
public class HostRegexTableLoadBalancerReconfigurationTest
extends BaseHostRegexTableLoadBalancerTest {
private final Map<TabletId,TabletServerId> assignments = new HashMap<>();
@Test
public void testConfigurationChanges() {
HashMap<String,TableId> tables = new HashMap<>();
tables.put(FOO.getTableName(), FOO.getId());
tables.put(BAR.getTableName(), BAR.getId());
tables.put(BAZ.getTableName(), BAZ.getId());
ConfigurationCopy config = new ConfigurationCopy(SiteConfiguration.empty().build());
DEFAULT_TABLE_PROPERTIES.forEach(config::set);
ConfigurationImpl configImpl = new ConfigurationImpl(config);
BalancerEnvironment environment = createMock(BalancerEnvironment.class);
expect(environment.getConfiguration()).andReturn(configImpl).anyTimes();
expect(environment.getTableIdMap()).andReturn(tables).anyTimes();
expect(environment.getConfiguration(anyObject(TableId.class))).andReturn(configImpl).anyTimes();
replay(environment);
init(environment);
Map<TabletId,TabletServerId> unassigned = new HashMap<>();
for (List<TabletId> tablets : tableTablets.values()) {
for (TabletId tablet : tablets) {
unassigned.put(tablet, null);
}
}
this.getAssignments(
new AssignmentParamsImpl(Collections.unmodifiableSortedMap(allTabletServers),
Collections.unmodifiableMap(unassigned), assignments));
assertEquals(15, assignments.size());
// Ensure unique tservers
for (Entry<TabletId,TabletServerId> e : assignments.entrySet()) {
for (Entry<TabletId,TabletServerId> e2 : assignments.entrySet()) {
if (e.getKey().equals(e2.getKey())) {
continue;
}
if (e.getValue().equals(e2.getValue())) {
fail("Assignment failure. " + e.getKey() + " and " + e2.getKey()
+ " are assigned to the same host: " + e.getValue());
}
}
}
// Ensure assignments are correct
for (Entry<TabletId,TabletServerId> e : assignments.entrySet()) {
if (!tabletInBounds(e.getKey(), e.getValue())) {
fail("tablet not in bounds: " + e.getKey() + " -> " + e.getValue().getHost());
}
}
Set<TabletId> migrations = new HashSet<>();
List<TabletMigration> migrationsOut = new ArrayList<>();
// Wait to trigger the out of bounds check which will call our version of
// getOnlineTabletsForTable
UtilWaitThread.sleep(3000);
this.balance(new BalanceParamsImpl(Collections.unmodifiableSortedMap(allTabletServers),
migrations, migrationsOut));
assertEquals(0, migrationsOut.size());
// Change property, simulate call by TableConfWatcher
config.set(HostRegexTableLoadBalancer.HOST_BALANCER_PREFIX + BAR.getTableName(), "r01.*");
// Wait to trigger the out of bounds check and the repool check
UtilWaitThread.sleep(10000);
this.balance(new BalanceParamsImpl(Collections.unmodifiableSortedMap(allTabletServers),
migrations, migrationsOut));
assertEquals(5, migrationsOut.size());
for (TabletMigration migration : migrationsOut) {
assertTrue(migration.getNewTabletServer().getHost().startsWith("192.168.0.1")
|| migration.getNewTabletServer().getHost().startsWith("192.168.0.2")
|| migration.getNewTabletServer().getHost().startsWith("192.168.0.3")
|| migration.getNewTabletServer().getHost().startsWith("192.168.0.4")
|| migration.getNewTabletServer().getHost().startsWith("192.168.0.5"));
}
}
@Override
public List<TabletStatistics> getOnlineTabletsForTable(TabletServerId tserver, TableId tableId) {
List<TabletStatistics> tablets = new ArrayList<>();
// Report assignment information
for (Entry<TabletId,TabletServerId> e : this.assignments.entrySet()) {
if (e.getValue().equals(tserver) && e.getKey().getTable().equals(tableId)) {
TabletStats ts = new TabletStats();
TabletId tid = e.getKey();
ts.setExtent(
new KeyExtent(tid.getTable(), tid.getEndRow(), tid.getPrevEndRow()).toThrift());
tablets.add(new TabletStatisticsImpl(ts));
}
}
return tablets;
}
}
| 9,377 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/balancer/BaseHostRegexTableLoadBalancerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.manager.balancer.TServerStatusImpl;
import org.apache.accumulo.core.manager.balancer.TableStatisticsImpl;
import org.apache.accumulo.core.manager.balancer.TabletServerIdImpl;
import org.apache.accumulo.core.manager.balancer.TabletStatisticsImpl;
import org.apache.accumulo.core.manager.thrift.TableInfo;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TableStatistics;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.apache.accumulo.core.spi.balancer.data.TabletStatistics;
import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
import org.apache.hadoop.io.Text;
public abstract class BaseHostRegexTableLoadBalancerTest extends HostRegexTableLoadBalancer {
protected static class TestTable {
private final String tableName;
private final TableId id;
TestTable(String tableName, TableId id) {
this.tableName = tableName;
this.id = id;
}
public String getTableName() {
return tableName;
}
public TableId getId() {
return id;
}
}
protected static final HashMap<String,String> DEFAULT_TABLE_PROPERTIES = new HashMap<>();
{
DEFAULT_TABLE_PROPERTIES.put(HostRegexTableLoadBalancer.HOST_BALANCER_OOB_CHECK_KEY, "7s");
DEFAULT_TABLE_PROPERTIES.put(HostRegexTableLoadBalancer.HOST_BALANCER_REGEX_MAX_MIGRATIONS_KEY,
"4");
DEFAULT_TABLE_PROPERTIES
.put(HostRegexTableLoadBalancer.HOST_BALANCER_OUTSTANDING_MIGRATIONS_KEY, "10");
DEFAULT_TABLE_PROPERTIES
.put(HostRegexTableLoadBalancer.HOST_BALANCER_PREFIX + FOO.getTableName(), "r01.*");
DEFAULT_TABLE_PROPERTIES
.put(HostRegexTableLoadBalancer.HOST_BALANCER_PREFIX + BAR.getTableName(), "r02.*");
DEFAULT_TABLE_PROPERTIES.put(Property.TABLE_LOAD_BALANCER.getKey(),
TestSimpleBalancer.class.getName());
}
protected static final TestTable FOO = new TestTable("foo", TableId.of("1"));
protected static final TestTable BAR = new TestTable("bar", TableId.of("2"));
protected static final TestTable BAZ = new TestTable("baz", TableId.of("3"));
protected class TestSimpleBalancer extends SimpleLoadBalancer {
@Override
public List<TabletStatistics> getOnlineTabletsForTable(TabletServerId tserver,
TableId tableId) {
String tableName = idToTableName(tableId);
TabletServerId initialLocation = initialTableLocation.get(tableName);
if (tserver.equals(initialLocation)) {
List<TabletStatistics> list = new ArrayList<>(5);
for (TabletId tabletId : tableTablets.get(tableName)) {
TabletStats thriftStats = new TabletStats();
thriftStats.setExtent(
new KeyExtent(tabletId.getTable(), tabletId.getEndRow(), tabletId.getPrevEndRow())
.toThrift());
TabletStatistics stats = new TabletStatisticsImpl(thriftStats);
list.add(stats);
}
return list;
}
return null;
}
}
protected final Map<String,String> servers = new HashMap<>(15);
protected final SortedMap<TabletServerId,TServerStatus> allTabletServers = new TreeMap<>();
protected final Map<String,List<TabletId>> tableTablets = new HashMap<>(3);
protected final Map<String,TabletServerId> initialTableLocation = new HashMap<>(3);
{
servers.put("192.168.0.1", "r01s01");
servers.put("192.168.0.2", "r01s02");
servers.put("192.168.0.3", "r01s03");
servers.put("192.168.0.4", "r01s04");
servers.put("192.168.0.5", "r01s05");
servers.put("192.168.0.6", "r02s01");
servers.put("192.168.0.7", "r02s02");
servers.put("192.168.0.8", "r02s03");
servers.put("192.168.0.9", "r02s04");
servers.put("192.168.0.10", "r02s05");
servers.put("192.168.0.11", "r03s01");
servers.put("192.168.0.12", "r03s02");
servers.put("192.168.0.13", "r03s03");
servers.put("192.168.0.14", "r03s04");
servers.put("192.168.0.15", "r03s05");
allTabletServers.put(new TabletServerIdImpl("192.168.0.1", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.2", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.3", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.4", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.5", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.6", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.7", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.8", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.9", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.10", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.11", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.12", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.13", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.14", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
allTabletServers.put(new TabletServerIdImpl("192.168.0.15", 9997, Integer.toHexString(1)),
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
initialTableLocation.put(FOO.getTableName(),
new TabletServerIdImpl("192.168.0.1", 9997, Integer.toHexString(1)));
initialTableLocation.put(BAR.getTableName(),
new TabletServerIdImpl("192.168.0.6", 9997, Integer.toHexString(1)));
initialTableLocation.put(BAZ.getTableName(),
new TabletServerIdImpl("192.168.0.11", 9997, Integer.toHexString(1)));
tableTablets.put(FOO.getTableName(), new ArrayList<>());
tableTablets.get(FOO.getTableName())
.add(new TabletIdImpl(new KeyExtent(FOO.getId(), new Text("1"), new Text("0"))));
tableTablets.get(FOO.getTableName())
.add(new TabletIdImpl(new KeyExtent(FOO.getId(), new Text("2"), new Text("1"))));
tableTablets.get(FOO.getTableName())
.add(new TabletIdImpl(new KeyExtent(FOO.getId(), new Text("3"), new Text("2"))));
tableTablets.get(FOO.getTableName())
.add(new TabletIdImpl(new KeyExtent(FOO.getId(), new Text("4"), new Text("3"))));
tableTablets.get(FOO.getTableName())
.add(new TabletIdImpl(new KeyExtent(FOO.getId(), new Text("5"), new Text("4"))));
tableTablets.put(BAR.getTableName(), new ArrayList<>());
tableTablets.get(BAR.getTableName())
.add(new TabletIdImpl(new KeyExtent(BAR.getId(), new Text("11"), new Text("10"))));
tableTablets.get(BAR.getTableName())
.add(new TabletIdImpl(new KeyExtent(BAR.getId(), new Text("12"), new Text("11"))));
tableTablets.get(BAR.getTableName())
.add(new TabletIdImpl(new KeyExtent(BAR.getId(), new Text("13"), new Text("12"))));
tableTablets.get(BAR.getTableName())
.add(new TabletIdImpl(new KeyExtent(BAR.getId(), new Text("14"), new Text("13"))));
tableTablets.get(BAR.getTableName())
.add(new TabletIdImpl(new KeyExtent(BAR.getId(), new Text("15"), new Text("14"))));
tableTablets.put(BAZ.getTableName(), new ArrayList<>());
tableTablets.get(BAZ.getTableName())
.add(new TabletIdImpl(new KeyExtent(BAZ.getId(), new Text("21"), new Text("20"))));
tableTablets.get(BAZ.getTableName())
.add(new TabletIdImpl(new KeyExtent(BAZ.getId(), new Text("22"), new Text("21"))));
tableTablets.get(BAZ.getTableName())
.add(new TabletIdImpl(new KeyExtent(BAZ.getId(), new Text("23"), new Text("22"))));
tableTablets.get(BAZ.getTableName())
.add(new TabletIdImpl(new KeyExtent(BAZ.getId(), new Text("24"), new Text("23"))));
tableTablets.get(BAZ.getTableName())
.add(new TabletIdImpl(new KeyExtent(BAZ.getId(), new Text("25"), new Text("24"))));
}
protected boolean tabletInBounds(TabletId tabletId, TabletServerId tsi) {
String tid = tabletId.getTable().canonical();
String host = tsi.getHost();
if (tid.equals("1")
&& (host.equals("192.168.0.1") || host.equals("192.168.0.2") || host.equals("192.168.0.3")
|| host.equals("192.168.0.4") || host.equals("192.168.0.5"))) {
return true;
} else if (tid.equals("2")
&& (host.equals("192.168.0.6") || host.equals("192.168.0.7") || host.equals("192.168.0.8")
|| host.equals("192.168.0.9") || host.equals("192.168.0.10"))) {
return true;
} else {
return tid.equals("3") && (host.equals("192.168.0.11") || host.equals("192.168.0.12")
|| host.equals("192.168.0.13") || host.equals("192.168.0.14")
|| host.equals("192.168.0.15"));
}
}
protected String idToTableName(TableId id) {
if (id.equals(FOO.getId())) {
return FOO.getTableName();
} else if (id.equals(BAR.getId())) {
return BAR.getTableName();
} else if (id.equals(BAZ.getId())) {
return BAZ.getTableName();
} else {
return null;
}
}
@Override
protected TabletBalancer getBalancerForTable(TableId table) {
return new TestSimpleBalancer();
}
@Override
protected String getNameFromIp(String hostIp) throws UnknownHostException {
if (servers.containsKey(hostIp)) {
return servers.get(hostIp);
} else {
throw new UnknownHostException();
}
}
protected SortedMap<TabletServerId,TServerStatus> createCurrent(int numTservers) {
String base = "192.168.0.";
TreeMap<TabletServerId,TServerStatus> current = new TreeMap<>();
for (int i = 1; i <= numTservers; i++) {
TServerStatusImpl status =
new TServerStatusImpl(new org.apache.accumulo.core.manager.thrift.TabletServerStatus());
Map<String,TableStatistics> tableMap = new HashMap<>();
tableMap.put(FOO.getId().canonical(), new TableStatisticsImpl(new TableInfo()));
tableMap.put(BAR.getId().canonical(), new TableStatisticsImpl(new TableInfo()));
tableMap.put(BAZ.getId().canonical(), new TableStatisticsImpl(new TableInfo()));
status.setTableMap(tableMap);
current.put(new TabletServerIdImpl(base + i, 9997, Integer.toHexString(1)), status);
}
// now put all of the tablets on one server
for (Map.Entry<String,TabletServerId> entry : initialTableLocation.entrySet()) {
TServerStatus status = current.get(entry.getValue());
if (status != null) {
TableId tableId = environment.getTableIdMap().get(entry.getKey());
((TableStatisticsImpl) status.getTableMap().get(tableId.canonical()))
.setOnlineTabletCount(5);
}
}
return current;
}
}
| 9,378 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/balancer/TableLoadBalancerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.stream.Collectors;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.manager.balancer.BalanceParamsImpl;
import org.apache.accumulo.core.manager.balancer.TServerStatusImpl;
import org.apache.accumulo.core.manager.balancer.TabletServerIdImpl;
import org.apache.accumulo.core.manager.balancer.TabletStatisticsImpl;
import org.apache.accumulo.core.manager.thrift.TableInfo;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.apache.accumulo.core.spi.balancer.data.TabletStatistics;
import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
import org.apache.accumulo.core.util.ConfigurationImpl;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class TableLoadBalancerTest {
private static final Map<String,String> TABLE_ID_MAP =
Map.of("t1", "a1", "t2", "b12", "t3", "c4");
private static TabletServerId mkts(String host, int port, String session) {
return new TabletServerIdImpl(host, port, session);
}
private static TServerStatus status(Object... config) {
org.apache.accumulo.core.manager.thrift.TabletServerStatus thriftStatus =
new org.apache.accumulo.core.manager.thrift.TabletServerStatus();
thriftStatus.tableMap = new HashMap<>();
String tablename = null;
for (Object c : config) {
if (c instanceof String) {
tablename = (String) c;
} else {
TableInfo info = new TableInfo();
int count = (Integer) c;
info.onlineTablets = count;
info.tablets = count;
thriftStatus.tableMap.put(tablename, info);
}
}
return new TServerStatusImpl(thriftStatus);
}
private static final SortedMap<TabletServerId,TServerStatus> state = new TreeMap<>();
static List<TabletStatistics> generateFakeTablets(TabletServerId tserver, TableId tableId) {
List<TabletStatistics> result = new ArrayList<>();
TServerStatus tableInfo = state.get(tserver);
// generate some fake tablets
for (int i = 0; i < tableInfo.getTableMap().get(tableId.canonical()).getOnlineTabletCount();
i++) {
TabletStats stats = new TabletStats();
stats.extent =
new KeyExtent(tableId, new Text(tserver.getHost() + String.format("%03d", i + 1)),
new Text(tserver.getHost() + String.format("%03d", i))).toThrift();
result.add(new TabletStatisticsImpl(stats));
}
return result;
}
public static class TestSimpleLoadBalancer extends SimpleLoadBalancer {
public TestSimpleLoadBalancer(TableId table) {
super(table);
}
@Override
public void init(BalancerEnvironment balancerEnvironment) {}
@Override
public List<TabletStatistics> getOnlineTabletsForTable(TabletServerId tserver,
TableId tableId) {
return generateFakeTablets(tserver, tableId);
}
}
@Test
public void test() {
BalancerEnvironment environment = createMock(BalancerEnvironment.class);
ConfigurationCopy cc = new ConfigurationCopy(
Map.of(Property.TABLE_LOAD_BALANCER.getKey(), TestSimpleLoadBalancer.class.getName()));
ConfigurationImpl tableConfig = new ConfigurationImpl(cc);
Map<String,TableId> tableIdMap = TABLE_ID_MAP.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> TableId.of(e.getValue())));
expect(environment.getTableIdMap()).andReturn(tableIdMap).anyTimes();
expect(environment.isTableOnline(anyObject(TableId.class))).andReturn(true).anyTimes();
expect(environment.getConfiguration(anyObject(TableId.class))).andReturn(tableConfig)
.anyTimes();
expect(environment.tableContext(anyObject(TableId.class))).andReturn(null).anyTimes();
replay(environment);
String t1Id = TABLE_ID_MAP.get("t1"), t2Id = TABLE_ID_MAP.get("t2"),
t3Id = TABLE_ID_MAP.get("t3");
state.clear();
TabletServerId svr = mkts("10.0.0.1", 1234, "0x01020304");
state.put(svr, status(t1Id, 10, t2Id, 10, t3Id, 10));
Set<TabletId> migrations = Collections.emptySet();
List<TabletMigration> migrationsOut = new ArrayList<>();
TableLoadBalancer tls = new TableLoadBalancer();
tls.init(environment);
tls.balance(new BalanceParamsImpl(state, migrations, migrationsOut));
assertEquals(0, migrationsOut.size());
state.put(mkts("10.0.0.2", 2345, "0x02030405"), status());
tls = new TableLoadBalancer();
tls.init(environment);
tls.balance(new BalanceParamsImpl(state, migrations, migrationsOut));
int count = 0;
Map<TableId,Integer> movedByTable = new HashMap<>();
movedByTable.put(TableId.of(t1Id), 0);
movedByTable.put(TableId.of(t2Id), 0);
movedByTable.put(TableId.of(t3Id), 0);
for (TabletMigration migration : migrationsOut) {
if (migration.getOldTabletServer().equals(svr)) {
count++;
}
TableId key = migration.getTablet().getTable();
movedByTable.put(key, movedByTable.get(key) + 1);
}
assertEquals(15, count);
for (Integer moved : movedByTable.values()) {
assertEquals(5, moved.intValue());
}
}
}
| 9,379 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/balancer/HostRegexTableLoadBalancerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import java.util.regex.Pattern;
import org.apache.accumulo.core.conf.ConfigurationCopy;
import org.apache.accumulo.core.conf.SiteConfiguration;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.thrift.TKeyExtent;
import org.apache.accumulo.core.manager.balancer.AssignmentParamsImpl;
import org.apache.accumulo.core.manager.balancer.BalanceParamsImpl;
import org.apache.accumulo.core.manager.balancer.TabletServerIdImpl;
import org.apache.accumulo.core.manager.balancer.TabletStatisticsImpl;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.apache.accumulo.core.spi.balancer.data.TabletStatistics;
import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
import org.apache.accumulo.core.util.ConfigurationImpl;
import org.apache.accumulo.core.util.UtilWaitThread;
import org.junit.jupiter.api.Test;
public class HostRegexTableLoadBalancerTest extends BaseHostRegexTableLoadBalancerTest {
public void init(Map<String,String> tableProperties) {
HashMap<String,TableId> tables = new HashMap<>();
tables.put(FOO.getTableName(), FOO.getId());
tables.put(BAR.getTableName(), BAR.getId());
tables.put(BAZ.getTableName(), BAZ.getId());
ConfigurationCopy config = new ConfigurationCopy(SiteConfiguration.empty().build());
tableProperties.forEach(config::set);
ConfigurationImpl configImpl = new ConfigurationImpl(config);
BalancerEnvironment environment = createMock(BalancerEnvironment.class);
expect(environment.getConfiguration()).andReturn(configImpl).anyTimes();
expect(environment.getTableIdMap()).andReturn(tables).anyTimes();
expect(environment.getConfiguration(anyObject(TableId.class))).andReturn(configImpl).anyTimes();
replay(environment);
init(environment);
}
@Test
public void testInit() {
init(DEFAULT_TABLE_PROPERTIES);
assertEquals(7000, this.getOobCheckMillis(), "OOB check interval value is incorrect");
assertEquals(4, this.getMaxMigrations(), "Max migrations is incorrect");
assertEquals(10, this.getMaxOutstandingMigrations(), "Max outstanding migrations is incorrect");
assertFalse(isIpBasedRegex());
Map<String,Pattern> patterns = this.getPoolNameToRegexPattern();
assertEquals(2, patterns.size());
assertTrue(patterns.containsKey(FOO.getTableName()));
assertEquals(Pattern.compile("r01.*").pattern(), patterns.get(FOO.getTableName()).pattern());
assertTrue(patterns.containsKey(BAR.getTableName()));
assertEquals(Pattern.compile("r02.*").pattern(), patterns.get(BAR.getTableName()).pattern());
}
@Test
public void testBalance() {
init(DEFAULT_TABLE_PROPERTIES);
Set<TabletId> migrations = new HashSet<>();
List<TabletMigration> migrationsOut = new ArrayList<>();
long wait =
this.balance(new BalanceParamsImpl(Collections.unmodifiableSortedMap(createCurrent(15)),
migrations, migrationsOut));
assertEquals(20000, wait);
// should balance four tablets in one of the tables before reaching max
assertEquals(4, migrationsOut.size());
// now balance again passing in the new migrations
for (TabletMigration m : migrationsOut) {
migrations.add(m.getTablet());
}
migrationsOut.clear();
wait = this.balance(new BalanceParamsImpl(Collections.unmodifiableSortedMap(createCurrent(15)),
migrations, migrationsOut));
assertEquals(20000, wait);
// should balance four tablets in one of the other tables before reaching max
assertEquals(4, migrationsOut.size());
// now balance again passing in the new migrations
for (TabletMigration m : migrationsOut) {
migrations.add(m.getTablet());
}
migrationsOut.clear();
wait = this.balance(new BalanceParamsImpl(Collections.unmodifiableSortedMap(createCurrent(15)),
migrations, migrationsOut));
assertEquals(20000, wait);
// should balance four tablets in one of the other tables before reaching max
assertEquals(4, migrationsOut.size());
// now balance again passing in the new migrations
for (TabletMigration m : migrationsOut) {
migrations.add(m.getTablet());
}
migrationsOut.clear();
wait = this.balance(new BalanceParamsImpl(Collections.unmodifiableSortedMap(createCurrent(15)),
migrations, migrationsOut));
assertEquals(20000, wait);
// no more balancing to do
assertEquals(0, migrationsOut.size());
}
@Test
public void testBalanceWithTooManyOutstandingMigrations() {
List<TabletMigration> migrationsOut = new ArrayList<>();
init(DEFAULT_TABLE_PROPERTIES);
// lets say we already have migrations ongoing for the FOO and BAR table extends (should be 5 of
// each of them) for a total of 10
Set<TabletId> migrations = new HashSet<>();
migrations.addAll(tableTablets.get(FOO.getTableName()));
migrations.addAll(tableTablets.get(BAR.getTableName()));
long wait =
this.balance(new BalanceParamsImpl(Collections.unmodifiableSortedMap(createCurrent(15)),
migrations, migrationsOut));
assertEquals(20000, wait);
// no migrations should have occurred as 10 is the maxOutstandingMigrations
assertEquals(0, migrationsOut.size());
}
@Test
public void testSplitCurrentByRegexUsingHostname() {
init(DEFAULT_TABLE_PROPERTIES);
Map<String,SortedMap<TabletServerId,TServerStatus>> groups =
this.splitCurrentByRegex(createCurrent(15));
assertEquals(3, groups.size());
assertTrue(groups.containsKey(FOO.getTableName()));
SortedMap<TabletServerId,TServerStatus> fooHosts = groups.get(FOO.getTableName());
assertEquals(5, fooHosts.size());
assertTrue(
fooHosts.containsKey(new TabletServerIdImpl("192.168.0.1", 9997, Integer.toHexString(1))));
assertTrue(
fooHosts.containsKey(new TabletServerIdImpl("192.168.0.2", 9997, Integer.toHexString(1))));
assertTrue(
fooHosts.containsKey(new TabletServerIdImpl("192.168.0.3", 9997, Integer.toHexString(1))));
assertTrue(
fooHosts.containsKey(new TabletServerIdImpl("192.168.0.4", 9997, Integer.toHexString(1))));
assertTrue(
fooHosts.containsKey(new TabletServerIdImpl("192.168.0.5", 9997, Integer.toHexString(1))));
assertTrue(groups.containsKey(BAR.getTableName()));
SortedMap<TabletServerId,TServerStatus> barHosts = groups.get(BAR.getTableName());
assertEquals(5, barHosts.size());
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.6", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.7", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.8", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.9", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.10", 9997, Integer.toHexString(1))));
assertTrue(groups.containsKey(DEFAULT_POOL));
SortedMap<TabletServerId,TServerStatus> defHosts = groups.get(DEFAULT_POOL);
assertEquals(5, defHosts.size());
assertTrue(
defHosts.containsKey(new TabletServerIdImpl("192.168.0.11", 9997, Integer.toHexString(1))));
assertTrue(
defHosts.containsKey(new TabletServerIdImpl("192.168.0.12", 9997, Integer.toHexString(1))));
assertTrue(
defHosts.containsKey(new TabletServerIdImpl("192.168.0.13", 9997, Integer.toHexString(1))));
assertTrue(
defHosts.containsKey(new TabletServerIdImpl("192.168.0.14", 9997, Integer.toHexString(1))));
assertTrue(
defHosts.containsKey(new TabletServerIdImpl("192.168.0.15", 9997, Integer.toHexString(1))));
}
@Test
public void testSplitCurrentByRegexUsingOverlappingPools() {
HashMap<String,String> props = new HashMap<>(DEFAULT_TABLE_PROPERTIES);
props.put(HostRegexTableLoadBalancer.HOST_BALANCER_PREFIX + FOO.getTableName(), "r.*");
props.put(HostRegexTableLoadBalancer.HOST_BALANCER_PREFIX + BAR.getTableName(), "r01.*|r02.*");
init(props);
Map<String,SortedMap<TabletServerId,TServerStatus>> groups =
this.splitCurrentByRegex(createCurrent(15));
// Groups foo, bar, and the default pool which contains all known hosts
assertEquals(3, groups.size());
assertTrue(groups.containsKey(FOO.getTableName()));
assertTrue(groups.containsKey(DEFAULT_POOL));
for (String pool : new String[] {FOO.getTableName(), DEFAULT_POOL}) {
SortedMap<TabletServerId,TServerStatus> fooHosts = groups.get(pool);
assertEquals(15, fooHosts.size());
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.1", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.2", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.3", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.4", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.5", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.6", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.7", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.8", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.9", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.10", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.11", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.12", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.13", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.14", 9997, Integer.toHexString(1))));
assertTrue(fooHosts
.containsKey(new TabletServerIdImpl("192.168.0.15", 9997, Integer.toHexString(1))));
}
assertTrue(groups.containsKey(BAR.getTableName()));
SortedMap<TabletServerId,TServerStatus> barHosts = groups.get(BAR.getTableName());
assertEquals(10, barHosts.size());
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.1", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.2", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.3", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.4", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.5", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.6", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.7", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.8", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.9", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.10", 9997, Integer.toHexString(1))));
}
@Test
public void testSplitCurrentByRegexUsingIP() {
HashMap<String,String> props = new HashMap<>();
props.put(HostRegexTableLoadBalancer.HOST_BALANCER_OOB_CHECK_KEY, "30s");
props.put(HostRegexTableLoadBalancer.HOST_BALANCER_REGEX_USING_IPS_KEY, "true");
props.put(HostRegexTableLoadBalancer.HOST_BALANCER_PREFIX + FOO.getTableName(),
"192\\.168\\.0\\.[1-5]");
props.put(HostRegexTableLoadBalancer.HOST_BALANCER_PREFIX + BAR.getTableName(),
"192\\.168\\.0\\.[6-9]|192\\.168\\.0\\.10");
init(props);
assertTrue(isIpBasedRegex());
Map<String,SortedMap<TabletServerId,TServerStatus>> groups =
this.splitCurrentByRegex(createCurrent(15));
assertEquals(3, groups.size());
assertTrue(groups.containsKey(FOO.getTableName()));
SortedMap<TabletServerId,TServerStatus> fooHosts = groups.get(FOO.getTableName());
assertEquals(5, fooHosts.size());
assertTrue(
fooHosts.containsKey(new TabletServerIdImpl("192.168.0.1", 9997, Integer.toHexString(1))));
assertTrue(
fooHosts.containsKey(new TabletServerIdImpl("192.168.0.2", 9997, Integer.toHexString(1))));
assertTrue(
fooHosts.containsKey(new TabletServerIdImpl("192.168.0.3", 9997, Integer.toHexString(1))));
assertTrue(
fooHosts.containsKey(new TabletServerIdImpl("192.168.0.4", 9997, Integer.toHexString(1))));
assertTrue(
fooHosts.containsKey(new TabletServerIdImpl("192.168.0.5", 9997, Integer.toHexString(1))));
assertTrue(groups.containsKey(BAR.getTableName()));
SortedMap<TabletServerId,TServerStatus> barHosts = groups.get(BAR.getTableName());
assertEquals(5, barHosts.size());
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.6", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.7", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.8", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.9", 9997, Integer.toHexString(1))));
assertTrue(
barHosts.containsKey(new TabletServerIdImpl("192.168.0.10", 9997, Integer.toHexString(1))));
assertTrue(groups.containsKey(DEFAULT_POOL));
SortedMap<TabletServerId,TServerStatus> defHosts = groups.get(DEFAULT_POOL);
assertEquals(5, defHosts.size());
assertTrue(
defHosts.containsKey(new TabletServerIdImpl("192.168.0.11", 9997, Integer.toHexString(1))));
assertTrue(
defHosts.containsKey(new TabletServerIdImpl("192.168.0.12", 9997, Integer.toHexString(1))));
assertTrue(
defHosts.containsKey(new TabletServerIdImpl("192.168.0.13", 9997, Integer.toHexString(1))));
assertTrue(
defHosts.containsKey(new TabletServerIdImpl("192.168.0.14", 9997, Integer.toHexString(1))));
assertTrue(
defHosts.containsKey(new TabletServerIdImpl("192.168.0.15", 9997, Integer.toHexString(1))));
}
@Test
public void testAllUnassigned() {
init(DEFAULT_TABLE_PROPERTIES);
Map<TabletId,TabletServerId> assignments = new HashMap<>();
Map<TabletId,TabletServerId> unassigned = new HashMap<>();
for (List<TabletId> extents : tableTablets.values()) {
for (TabletId tabletId : extents) {
unassigned.put(tabletId, null);
}
}
this.getAssignments(
new AssignmentParamsImpl(Collections.unmodifiableSortedMap(allTabletServers),
Collections.unmodifiableMap(unassigned), assignments));
assertEquals(15, assignments.size());
// Ensure unique tservers
for (Entry<TabletId,TabletServerId> e : assignments.entrySet()) {
for (Entry<TabletId,TabletServerId> e2 : assignments.entrySet()) {
if (e.getKey().equals(e2.getKey())) {
continue;
}
if (e.getValue().equals(e2.getValue())) {
fail("Assignment failure");
}
}
}
// Ensure assignments are correct
for (Entry<TabletId,TabletServerId> e : assignments.entrySet()) {
if (!tabletInBounds(e.getKey(), e.getValue())) {
fail("tablet not in bounds: " + e.getKey() + " -> " + e.getValue().getHost());
}
}
}
@Test
public void testAllAssigned() {
init(DEFAULT_TABLE_PROPERTIES);
Map<TabletId,TabletServerId> assignments = new HashMap<>();
this.getAssignments(new AssignmentParamsImpl(
Collections.unmodifiableSortedMap(allTabletServers), Map.of(), assignments));
assertEquals(0, assignments.size());
}
@Test
public void testPartiallyAssigned() {
init(DEFAULT_TABLE_PROPERTIES);
Map<TabletId,TabletServerId> assignments = new HashMap<>();
Map<TabletId,TabletServerId> unassigned = new HashMap<>();
int i = 0;
for (List<TabletId> tablets : tableTablets.values()) {
for (TabletId tabletId : tablets) {
if ((i % 2) == 0) {
unassigned.put(tabletId, null);
}
i++;
}
}
this.getAssignments(
new AssignmentParamsImpl(Collections.unmodifiableSortedMap(allTabletServers),
Collections.unmodifiableMap(unassigned), assignments));
assertEquals(unassigned.size(), assignments.size());
// Ensure unique tservers
for (Entry<TabletId,TabletServerId> e : assignments.entrySet()) {
for (Entry<TabletId,TabletServerId> e2 : assignments.entrySet()) {
if (e.getKey().equals(e2.getKey())) {
continue;
}
if (e.getValue().equals(e2.getValue())) {
fail("Assignment failure");
}
}
}
// Ensure assignments are correct
for (Entry<TabletId,TabletServerId> e : assignments.entrySet()) {
if (!tabletInBounds(e.getKey(), e.getValue())) {
fail("tablet not in bounds: " + e.getKey() + " -> " + e.getValue().getHost());
}
}
}
@Test
public void testUnassignedWithNoTServers() {
init(DEFAULT_TABLE_PROPERTIES);
Map<TabletId,TabletServerId> assignments = new HashMap<>();
Map<TabletId,TabletServerId> unassigned = new HashMap<>();
for (TabletId tabletId : tableTablets.get(BAR.getTableName())) {
unassigned.put(tabletId, null);
}
SortedMap<TabletServerId,TServerStatus> current = createCurrent(15);
// Remove the BAR tablet servers from current
List<TabletServerId> removals = new ArrayList<>();
for (Entry<TabletServerId,TServerStatus> e : current.entrySet()) {
if (e.getKey().getHost().equals("192.168.0.6") || e.getKey().getHost().equals("192.168.0.7")
|| e.getKey().getHost().equals("192.168.0.8")
|| e.getKey().getHost().equals("192.168.0.9")
|| e.getKey().getHost().equals("192.168.0.10")) {
removals.add(e.getKey());
}
}
for (TabletServerId r : removals) {
current.remove(r);
}
this.getAssignments(new AssignmentParamsImpl(Collections.unmodifiableSortedMap(current),
Collections.unmodifiableMap(unassigned), assignments));
assertEquals(unassigned.size(), assignments.size());
// Ensure assignments are correct
// Ensure tablets are assigned in default pool
for (Entry<TabletId,TabletServerId> e : assignments.entrySet()) {
if (tabletInBounds(e.getKey(), e.getValue())) {
fail("tablet unexpectedly in bounds: " + e.getKey() + " -> " + e.getValue().getHost());
}
}
}
@Test
public void testUnassignedWithNoDefaultPool() {
init(DEFAULT_TABLE_PROPERTIES);
Map<TabletId,TabletServerId> assignments = new HashMap<>();
Map<TabletId,TabletServerId> unassigned = new HashMap<>();
for (TabletId tabletId : tableTablets.get(BAR.getTableName())) {
unassigned.put(tabletId, null);
}
SortedMap<TabletServerId,TServerStatus> current = createCurrent(15);
// Remove the BAR tablet servers and default pool from current
List<TabletServerId> removals = new ArrayList<>();
for (Entry<TabletServerId,TServerStatus> e : current.entrySet()) {
if (e.getKey().getHost().equals("192.168.0.6") || e.getKey().getHost().equals("192.168.0.7")
|| e.getKey().getHost().equals("192.168.0.8")
|| e.getKey().getHost().equals("192.168.0.9")
|| e.getKey().getHost().equals("192.168.0.10")
|| e.getKey().getHost().equals("192.168.0.11")
|| e.getKey().getHost().equals("192.168.0.12")
|| e.getKey().getHost().equals("192.168.0.13")
|| e.getKey().getHost().equals("192.168.0.14")
|| e.getKey().getHost().equals("192.168.0.15")) {
removals.add(e.getKey());
}
}
for (TabletServerId r : removals) {
current.remove(r);
}
this.getAssignments(new AssignmentParamsImpl(Collections.unmodifiableSortedMap(current),
Collections.unmodifiableMap(unassigned), assignments));
assertEquals(unassigned.size(), assignments.size());
// Ensure tablets are assigned in default pool
for (Entry<TabletId,TabletServerId> e : assignments.entrySet()) {
if (tabletInBounds(e.getKey(), e.getValue())) {
fail("tablet unexpectedly in bounds: " + e.getKey() + " -> " + e.getValue().getHost());
}
}
}
@Test
public void testOutOfBoundsTablets() {
init(DEFAULT_TABLE_PROPERTIES);
// Wait to trigger the out of bounds check which will call our version of
// getOnlineTabletsForTable
UtilWaitThread.sleep(11000);
Set<TabletId> migrations = new HashSet<>();
List<TabletMigration> migrationsOut = new ArrayList<>();
this.balance(new BalanceParamsImpl(createCurrent(15), migrations, migrationsOut));
assertEquals(2, migrationsOut.size());
}
@Override
public List<TabletStatistics> getOnlineTabletsForTable(TabletServerId tserver, TableId tableId) {
// Report incorrect information so that balance will create an assignment
List<TabletStatistics> tablets = new ArrayList<>();
if (tableId.equals(BAR.getId()) && tserver.getHost().equals("192.168.0.1")) {
// Report that we have a bar tablet on this server
TKeyExtent tke = new TKeyExtent();
tke.setTable(BAR.getId().canonical().getBytes(UTF_8));
tke.setEndRow("11".getBytes());
tke.setPrevEndRow("10".getBytes());
TabletStats tstats = new TabletStats();
tstats.setExtent(tke);
TabletStatistics ts = new TabletStatisticsImpl(tstats);
tablets.add(ts);
} else if (tableId.equals(FOO.getId()) && tserver.getHost().equals("192.168.0.6")) {
// Report that we have a foo tablet on this server
TKeyExtent tke = new TKeyExtent();
tke.setTable(FOO.getId().canonical().getBytes(UTF_8));
tke.setEndRow("1".getBytes());
tke.setPrevEndRow("0".getBytes());
TabletStats tstats = new TabletStats();
tstats.setExtent(tke);
TabletStatistics ts = new TabletStatisticsImpl(tstats);
tablets.add(ts);
}
return tablets;
}
}
| 9,380 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/balancer/SimpleLoadBalancerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.fail;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.manager.balancer.BalanceParamsImpl;
import org.apache.accumulo.core.manager.balancer.TServerStatusImpl;
import org.apache.accumulo.core.manager.balancer.TabletServerIdImpl;
import org.apache.accumulo.core.manager.balancer.TabletStatisticsImpl;
import org.apache.accumulo.core.manager.thrift.TableInfo;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.apache.accumulo.core.spi.balancer.data.TabletStatistics;
import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class SimpleLoadBalancerTest {
static class FakeTServer {
List<TabletId> tablets = new ArrayList<>();
TServerStatus getStatus() {
org.apache.accumulo.core.manager.thrift.TabletServerStatus result =
new org.apache.accumulo.core.manager.thrift.TabletServerStatus();
result.tableMap = new HashMap<>();
for (TabletId tabletId : tablets) {
TableInfo info = result.tableMap.get(tabletId.getTable().canonical());
if (info == null) {
result.tableMap.put(tabletId.getTable().canonical(), info = new TableInfo());
}
info.onlineTablets++;
info.recs = info.onlineTablets;
info.ingestRate = 123.;
info.queryRate = 456.;
}
return new TServerStatusImpl(result);
}
}
Map<TabletServerId,FakeTServer> servers = new HashMap<>();
Map<TabletId,TabletServerId> last = new HashMap<>();
class TestSimpleLoadBalancer extends SimpleLoadBalancer {
@Override
protected List<TabletStatistics> getOnlineTabletsForTable(TabletServerId tserver,
TableId tableId) {
List<TabletStatistics> result = new ArrayList<>();
for (TabletId tabletId : servers.get(tserver).tablets) {
if (tabletId.getTable().equals(tableId)) {
KeyExtent extent = new KeyExtent(tableId, tabletId.getEndRow(), tabletId.getPrevEndRow());
TabletStats stats =
new TabletStats(new TabletStats(extent.toThrift(), null, null, null, 0L, 0., 0., 0));
result.add(new TabletStatisticsImpl(stats));
}
}
return result;
}
}
@BeforeEach
public void setUp() {
last.clear();
servers.clear();
}
@Test
public void testAssignMigrations() {
servers.put(new TabletServerIdImpl("127.0.0.1", 1234, "a"), new FakeTServer());
servers.put(new TabletServerIdImpl("127.0.0.2", 1234, "b"), new FakeTServer());
servers.put(new TabletServerIdImpl("127.0.0.3", 1234, "c"), new FakeTServer());
List<TabletId> metadataTable = new ArrayList<>();
String table = "t1";
metadataTable.add(makeTablet(table, null, null));
table = "t2";
metadataTable.add(makeTablet(table, "a", null));
metadataTable.add(makeTablet(table, null, "a"));
table = "t3";
metadataTable.add(makeTablet(table, "a", null));
metadataTable.add(makeTablet(table, "b", "a"));
metadataTable.add(makeTablet(table, "c", "b"));
metadataTable.add(makeTablet(table, "d", "c"));
metadataTable.add(makeTablet(table, "e", "d"));
metadataTable.add(makeTablet(table, null, "e"));
Collections.sort(metadataTable);
TestSimpleLoadBalancer balancer = new TestSimpleLoadBalancer();
SortedMap<TabletServerId,TServerStatus> current = new TreeMap<>();
for (Entry<TabletServerId,FakeTServer> entry : servers.entrySet()) {
current.put(entry.getKey(), entry.getValue().getStatus());
}
assignTablets(metadataTable, servers, current, balancer);
// Verify that the counts on the tables are correct
Map<String,Integer> expectedCounts = new HashMap<>();
expectedCounts.put("t1", 1);
expectedCounts.put("t2", 1);
expectedCounts.put("t3", 2);
checkBalance(metadataTable, servers, expectedCounts);
// Rebalance once
for (Entry<TabletServerId,FakeTServer> entry : servers.entrySet()) {
current.put(entry.getKey(), entry.getValue().getStatus());
}
// Nothing should happen, we are balanced
ArrayList<TabletMigration> out = new ArrayList<>();
balancer.getMigrations(current, out);
assertEquals(out.size(), 0);
// Take down a tabletServer
TabletServerId first = current.keySet().iterator().next();
current.remove(first);
FakeTServer remove = servers.remove(first);
// reassign offline extents
assignTablets(remove.tablets, servers, current, balancer);
checkBalance(metadataTable, servers, null);
}
private void assignTablets(List<TabletId> metadataTable, Map<TabletServerId,FakeTServer> servers,
SortedMap<TabletServerId,TServerStatus> status, TestSimpleLoadBalancer balancer) {
// Assign tablets
for (TabletId tabletId : metadataTable) {
TabletServerId assignment = balancer.getAssignment(status, last.get(tabletId));
assertNotNull(assignment);
assertFalse(servers.get(assignment).tablets.contains(tabletId));
servers.get(assignment).tablets.add(tabletId);
last.put(tabletId, assignment);
}
}
SortedMap<TabletServerId,TServerStatus> getAssignments(Map<TabletServerId,FakeTServer> servers) {
SortedMap<TabletServerId,TServerStatus> result = new TreeMap<>();
for (Entry<TabletServerId,FakeTServer> entry : servers.entrySet()) {
result.put(entry.getKey(), entry.getValue().getStatus());
}
return result;
}
@Test
public void testUnevenAssignment() {
for (char c : "abcdefghijklmnopqrstuvwxyz".toCharArray()) {
String cString = Character.toString(c);
TabletServerId tsid = new TabletServerIdImpl("127.0.0.1", c, cString);
FakeTServer fakeTServer = new FakeTServer();
servers.put(tsid, fakeTServer);
fakeTServer.tablets.add(makeTablet(cString, null, null));
}
// Put more tablets on one server, but not more than the number of servers
Entry<TabletServerId,FakeTServer> first = servers.entrySet().iterator().next();
first.getValue().tablets.add(makeTablet("newTable", "a", null));
first.getValue().tablets.add(makeTablet("newTable", "b", "a"));
first.getValue().tablets.add(makeTablet("newTable", "c", "b"));
first.getValue().tablets.add(makeTablet("newTable", "d", "c"));
first.getValue().tablets.add(makeTablet("newTable", "e", "d"));
first.getValue().tablets.add(makeTablet("newTable", "f", "e"));
first.getValue().tablets.add(makeTablet("newTable", "g", "f"));
first.getValue().tablets.add(makeTablet("newTable", "h", "g"));
first.getValue().tablets.add(makeTablet("newTable", "i", null));
TestSimpleLoadBalancer balancer = new TestSimpleLoadBalancer();
Set<TabletId> migrations = Collections.emptySet();
int moved = 0;
// balance until we can't balance no more!
while (true) {
List<TabletMigration> migrationsOut = new ArrayList<>();
balancer.balance(new BalanceParamsImpl(getAssignments(servers), migrations, migrationsOut));
if (migrationsOut.isEmpty()) {
break;
}
for (TabletMigration migration : migrationsOut) {
if (servers.get(migration.getOldTabletServer()).tablets.remove(migration.getTablet())) {
moved++;
}
servers.get(migration.getNewTabletServer()).tablets.add(migration.getTablet());
}
}
assertEquals(8, moved);
}
@Test
public void testUnevenAssignment2() {
// make 26 servers
for (char c : "abcdefghijklmnopqrstuvwxyz".toCharArray()) {
TabletServerId tsid = new TabletServerIdImpl("127.0.0.1", c, Character.toString(c));
FakeTServer fakeTServer = new FakeTServer();
servers.put(tsid, fakeTServer);
}
// put 60 tablets on 25 of them
List<Entry<TabletServerId,FakeTServer>> shortList = new ArrayList<>(servers.entrySet());
Entry<TabletServerId,FakeTServer> shortServer = shortList.remove(0);
int c = 0;
for (int i = 0; i < 60; i++) {
for (Entry<TabletServerId,FakeTServer> entry : shortList) {
entry.getValue().tablets.add(makeTablet("t" + c, null, null));
}
}
// put 10 on the that short server:
for (int i = 0; i < 10; i++) {
shortServer.getValue().tablets.add(makeTablet("s" + i, null, null));
}
TestSimpleLoadBalancer balancer = new TestSimpleLoadBalancer();
Set<TabletId> migrations = Collections.emptySet();
int moved = 0;
// balance until we can't balance no more!
while (true) {
List<TabletMigration> migrationsOut = new ArrayList<>();
balancer.balance(new BalanceParamsImpl(getAssignments(servers), migrations, migrationsOut));
if (migrationsOut.isEmpty()) {
break;
}
for (TabletMigration migration : migrationsOut) {
if (servers.get(migration.getOldTabletServer()).tablets.remove(migration.getTablet())) {
moved++;
}
last.remove(migration.getTablet());
servers.get(migration.getNewTabletServer()).tablets.add(migration.getTablet());
last.put(migration.getTablet(), migration.getNewTabletServer());
}
}
// average is 58, with 2 at 59: we need 48 more moved to the short server
assertEquals(48, moved);
}
private void checkBalance(List<TabletId> metadataTable, Map<TabletServerId,FakeTServer> servers,
Map<String,Integer> expectedCounts) {
// Verify they are spread evenly over the cluster
int average = metadataTable.size() / servers.size();
for (FakeTServer server : servers.values()) {
int diff = server.tablets.size() - average;
if (diff < 0) {
fail("average number of tablets is " + average + " but a server has "
+ server.tablets.size());
}
if (diff > 1) {
fail("average number of tablets is " + average + " but a server has "
+ server.tablets.size());
}
}
if (expectedCounts != null) {
for (FakeTServer server : servers.values()) {
Map<String,Integer> counts = new HashMap<>();
server.tablets.forEach(tabletId -> {
String t = tabletId.getTable().canonical();
counts.putIfAbsent(t, 0);
counts.put(t, counts.get(t) + 1);
});
counts.forEach((k, v) -> assertEquals(expectedCounts.get(k), v));
}
}
}
private static TabletId makeTablet(String table, String end, String prev) {
return new TabletIdImpl(new KeyExtent(TableId.of(table), toText(end), toText(prev)));
}
private static Text toText(String value) {
if (value != null) {
return new Text(value);
}
return null;
}
}
| 9,381 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/balancer/GroupBalancerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.balancer;
import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.function.Function;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.TabletIdImpl;
import org.apache.accumulo.core.manager.balancer.BalanceParamsImpl;
import org.apache.accumulo.core.manager.balancer.TServerStatusImpl;
import org.apache.accumulo.core.manager.balancer.TabletServerIdImpl;
import org.apache.accumulo.core.spi.balancer.data.TServerStatus;
import org.apache.accumulo.core.spi.balancer.data.TabletMigration;
import org.apache.accumulo.core.spi.balancer.data.TabletServerId;
import org.apache.accumulo.core.util.MapCounter;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class GroupBalancerTest {
private static final Function<TabletId,String> partitioner =
input -> (input == null || input.getEndRow() == null) ? null
: input.getEndRow().toString().substring(0, 2);
public static class TabletServers {
private final Set<TabletServerId> tservers = new HashSet<>();
private final Map<TabletId,TabletServerId> tabletLocs = new HashMap<>();
public void addTservers(String... locs) {
for (String loc : locs) {
int idx = loc.indexOf(':');
addTserver(loc.substring(0, idx), Integer.parseInt(loc.substring(idx + 1)));
}
}
public void addTserver(String host, int port) {
tservers.add(new TabletServerIdImpl(host, port, Long.toHexString(6)));
}
public void addTablet(String er, String host, int port) {
TabletServerId tsi = new TabletServerIdImpl(host, port, Long.toHexString(6));
tabletLocs.put(
new TabletIdImpl(new KeyExtent(TableId.of("b"), er == null ? null : new Text(er), null)),
new TabletServerIdImpl(host, port, Long.toHexString(6)));
tservers.add(tsi);
}
public void balance() {
balance(10000);
}
public void balance(final int maxMigrations) {
GroupBalancer balancer = new GroupBalancer(TableId.of("1")) {
@Override
protected Map<TabletId,TabletServerId> getLocationProvider() {
return tabletLocs;
}
@Override
protected Function<TabletId,String> getPartitioner() {
return partitioner;
}
@Override
protected long getWaitTime() {
return 0;
}
@Override
protected int getMaxMigrations() {
return maxMigrations;
}
};
balance(balancer, maxMigrations);
}
public void balance(TabletBalancer balancer, int maxMigrations) {
while (true) {
Set<TabletId> migrations = new HashSet<>();
List<TabletMigration> migrationsOut = new ArrayList<>();
SortedMap<TabletServerId,TServerStatus> current = new TreeMap<>();
for (TabletServerId tsi : tservers) {
current.put(tsi, new TServerStatusImpl(
new org.apache.accumulo.core.manager.thrift.TabletServerStatus()));
}
balancer.balance(new BalanceParamsImpl(current, migrations, migrationsOut));
assertTrue(migrationsOut.size() <= (maxMigrations + 5),
"Max Migration exceeded " + maxMigrations + " " + migrationsOut.size());
for (TabletMigration tabletMigration : migrationsOut) {
assertEquals(tabletLocs.get(tabletMigration.getTablet()),
tabletMigration.getOldTabletServer());
assertTrue(tservers.contains(tabletMigration.getNewTabletServer()));
tabletLocs.put(tabletMigration.getTablet(), tabletMigration.getNewTabletServer());
}
if (migrationsOut.isEmpty()) {
break;
}
}
checkBalance();
}
void checkBalance() {
MapCounter<String> groupCounts = new MapCounter<>();
Map<TabletServerId,MapCounter<String>> tserverGroupCounts = new HashMap<>();
for (Entry<TabletId,TabletServerId> entry : tabletLocs.entrySet()) {
String group = partitioner.apply(entry.getKey());
TabletServerId loc = entry.getValue();
groupCounts.increment(group, 1);
MapCounter<String> tgc = tserverGroupCounts.get(loc);
if (tgc == null) {
tgc = new MapCounter<>();
tserverGroupCounts.put(loc, tgc);
}
tgc.increment(group, 1);
}
Map<String,Integer> expectedCounts = new HashMap<>();
int totalExtra = 0;
for (String group : groupCounts.keySet()) {
long groupCount = groupCounts.get(group);
totalExtra += groupCount % tservers.size();
expectedCounts.put(group, (int) (groupCount / tservers.size()));
}
// The number of extra tablets from all groups that each tserver must have.
int expectedExtra = totalExtra / tservers.size();
int maxExtraGroups = expectedExtra + ((totalExtra % tservers.size() > 0) ? 1 : 0);
for (Entry<TabletServerId,MapCounter<String>> entry : tserverGroupCounts.entrySet()) {
MapCounter<String> tgc = entry.getValue();
int tserverExtra = 0;
for (String group : groupCounts.keySet()) {
assertTrue(tgc.get(group) >= expectedCounts.get(group));
assertTrue(tgc.get(group) <= expectedCounts.get(group) + 1,
"Group counts not as expected group:" + group + " actual:" + tgc.get(group)
+ " expected:" + (expectedCounts.get(group) + 1) + " tserver:" + entry.getKey());
tserverExtra += tgc.get(group) - expectedCounts.get(group);
}
assertTrue(tserverExtra >= expectedExtra);
assertTrue(tserverExtra <= maxExtraGroups);
}
}
}
@Test
public void testSingleGroup() {
String[][] tests = {new String[] {"a", "b", "c", "d"}, new String[] {"a", "b", "c"},
new String[] {"a", "b", "c", "d", "e"}, new String[] {"a", "b", "c", "d", "e", "f", "g"},
new String[] {"a", "b", "c", "d", "e", "f", "g", "h"},
new String[] {"a", "b", "c", "d", "e", "f", "g", "h", "i"}, new String[] {"a"}};
for (String[] suffixes : tests) {
for (int maxTS = 1; maxTS <= 4; maxTS++) {
TabletServers tservers = new TabletServers();
int ts = 0;
for (String s : suffixes) {
tservers.addTablet("01" + s, "192.168.1." + ((ts++ % maxTS) + 1), 9997);
}
tservers.addTservers("192.168.1.2:9997", "192.168.1.3:9997", "192.168.1.4:9997");
tservers.balance();
tservers.balance();
}
}
}
@Test
public void testTwoGroups() {
String[][] tests = {new String[] {"a", "b", "c", "d"}, new String[] {"a", "b", "c"},
new String[] {"a", "b", "c", "d", "e"}, new String[] {"a", "b", "c", "d", "e", "f", "g"},
new String[] {"a", "b", "c", "d", "e", "f", "g", "h"},
new String[] {"a", "b", "c", "d", "e", "f", "g", "h", "i"}, new String[] {"a"}};
for (String[] suffixes1 : tests) {
for (String[] suffixes2 : tests) {
for (int maxTS = 1; maxTS <= 4; maxTS++) {
TabletServers tservers = new TabletServers();
int ts = 0;
for (String s : suffixes1) {
tservers.addTablet("01" + s, "192.168.1." + ((ts++ % maxTS) + 1), 9997);
}
for (String s : suffixes2) {
tservers.addTablet("02" + s, "192.168.1." + ((ts++ % maxTS) + 1), 9997);
}
tservers.addTservers("192.168.1.2:9997", "192.168.1.3:9997", "192.168.1.4:9997");
tservers.balance();
tservers.balance();
}
}
}
}
@Test
public void testThreeGroups() {
String[][] tests = {new String[] {"a", "b", "c", "d"}, new String[] {"a", "b", "c"},
new String[] {"a", "b", "c", "d", "e"}, new String[] {"a", "b", "c", "d", "e", "f", "g"},
new String[] {"a", "b", "c", "d", "e", "f", "g", "h"},
new String[] {"a", "b", "c", "d", "e", "f", "g", "h", "i"}, new String[] {"a"}};
for (String[] suffixes1 : tests) {
for (String[] suffixes2 : tests) {
for (String[] suffixes3 : tests) {
for (int maxTS = 1; maxTS <= 4; maxTS++) {
TabletServers tservers = new TabletServers();
int ts = 0;
for (String s : suffixes1) {
tservers.addTablet("01" + s, "192.168.1." + ((ts++ % maxTS) + 1), 9997);
}
for (String s : suffixes2) {
tservers.addTablet("02" + s, "192.168.1." + ((ts++ % maxTS) + 1), 9997);
}
for (String s : suffixes3) {
tservers.addTablet("03" + s, "192.168.1." + ((ts++ % maxTS) + 1), 9997);
}
tservers.addTservers("192.168.1.2:9997", "192.168.1.3:9997", "192.168.1.4:9997");
tservers.balance();
tservers.balance();
}
}
}
}
}
@Test
public void testManySingleTabletGroups() {
for (int numGroups = 1; numGroups <= 13; numGroups++) {
for (int maxTS = 1; maxTS <= 4; maxTS++) {
TabletServers tservers = new TabletServers();
int ts = 0;
for (int group = 1; group <= numGroups; group++) {
tservers.addTablet(String.format("%02d:p", group), "192.168.1." + ((ts++ % maxTS) + 1),
9997);
}
tservers.addTservers("192.168.1.2:9997", "192.168.1.3:9997", "192.168.1.4:9997");
tservers.balance();
tservers.balance();
}
}
}
@Test
public void testMaxMigrations() {
for (int max : new int[] {1, 2, 3, 7, 10, 30}) {
TabletServers tservers = new TabletServers();
for (int i = 1; i <= 9; i++) {
tservers.addTablet("01" + i, "192.168.1.1", 9997);
}
for (int i = 1; i <= 4; i++) {
tservers.addTablet("02" + i, "192.168.1.2", 9997);
}
for (int i = 1; i <= 5; i++) {
tservers.addTablet("03" + i, "192.168.1.3", 9997);
}
tservers.addTservers("192.168.1.4:9997", "192.168.1.5:9997");
tservers.balance(max);
}
}
@Test
public void bigTest() {
TabletServers tservers = new TabletServers();
for (int g = 1; g <= 60; g++) {
for (int t = 1; t <= 241; t++) {
tservers.addTablet(String.format("%02d:%d", g, t),
"192.168.1." + (RANDOM.get().nextInt(249) + 1), 9997);
}
}
for (int i = 1; i <= 250; i++) {
tservers.addTserver("192.168.1." + i, 9997);
}
tservers.balance(1000);
}
@Test
public void bigTest2() {
TabletServers tservers = new TabletServers();
for (int g = 1; g <= 60; g++) {
for (int t = 1; t <= RANDOM.get().nextInt(1000); t++) {
tservers.addTablet(String.format("%02d:%d", g, t),
"192.168.1." + (RANDOM.get().nextInt(249) + 1), 9997);
}
}
for (int i = 1; i <= 250; i++) {
tservers.addTserver("192.168.1." + i, 9997);
}
tservers.balance(1000);
}
}
| 9,382 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/fs/SpaceAwareVolumeChooserTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.fs;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.util.Set;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.accumulo.core.spi.common.ServiceEnvironment.Configuration;
import org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment.Scope;
import org.easymock.EasyMock;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class SpaceAwareVolumeChooserTest {
VolumeChooserEnvironment chooserEnv = null;
ServiceEnvironment serviceEnv = null;
Configuration sysConfig = null;
double free1;
double free2;
int iterations = 1000;
String volumeOne = "hdfs://nn1:8020/apps/accumulo1/tables";
String volumeTwo = "hdfs://nn2:8020/applications/accumulo/tables";
// Different volumes with different paths
Set<String> tableDirs = Set.of(volumeOne, volumeTwo);
int vol1Count = 0;
int vol2Count = 0;
@BeforeEach
public void beforeTest() {
serviceEnv = EasyMock.createMock(ServiceEnvironment.class);
sysConfig = EasyMock.createMock(Configuration.class);
chooserEnv = EasyMock.createMock(VolumeChooserEnvironment.class);
}
private void testSpecificSetup(long percentage1, long percentage2, String cacheDuration,
int timesToCallPreferredVolumeChooser, boolean anyTimes) {
int max = iterations + 1;
int min = 1;
int updatePropertyMax = timesToCallPreferredVolumeChooser + iterations;
if (anyTimes) {
max = iterations + 1;
updatePropertyMax = max + 1;
}
free1 = percentage1 / (double) 100;
free2 = percentage2 / (double) 100;
EasyMock.expect(sysConfig.getCustom(SpaceAwareVolumeChooser.RECOMPUTE_INTERVAL))
.andReturn(cacheDuration).times(1);
EasyMock.expect(sysConfig.getCustom("volume.preferred." + Scope.DEFAULT.name().toLowerCase()))
.andReturn(String.join(",", tableDirs)).times(timesToCallPreferredVolumeChooser);
EasyMock.expect(serviceEnv.getConfiguration()).andReturn(sysConfig).times(1, updatePropertyMax);
EasyMock.expect(chooserEnv.getChooserScope()).andReturn(Scope.DEFAULT).times(min, max * 2);
EasyMock.expect(chooserEnv.getServiceEnv()).andReturn(serviceEnv).times(min, max);
EasyMock.replay(serviceEnv, sysConfig, chooserEnv);
}
@AfterEach
public void afterTest() {
EasyMock.verify(serviceEnv, sysConfig, chooserEnv);
serviceEnv = null;
vol1Count = 0;
vol2Count = 0;
}
@Test
public void testEvenWeightsWithCaching() {
testSpecificSetup(10L, 10L, null, iterations, false);
makeChoices();
assertEquals(iterations / 2.0, vol1Count, iterations / 10.0);
assertEquals(iterations / 2.0, vol2Count, iterations / 10.0);
}
@Test
public void testEvenWeightsNoCaching() {
testSpecificSetup(10L, 10L, "0", iterations, true);
makeChoices();
assertEquals(iterations / 2.0, vol1Count, iterations / 10.0);
assertEquals(iterations / 2.0, vol2Count, iterations / 10.0);
}
@Test
public void testNoFreeSpace() {
testSpecificSetup(0L, 0L, null, 1, false);
assertThrows(IllegalStateException.class, this::makeChoices);
}
@Test
public void testNinetyTen() {
testSpecificSetup(90L, 10L, null, iterations, false);
makeChoices();
assertEquals(iterations * .9, vol1Count, iterations / 10.0);
assertEquals(iterations * .1, vol2Count, iterations / 10.0);
}
@Test
public void testTenNinety() {
testSpecificSetup(10L, 90L, null, iterations, false);
makeChoices();
assertEquals(iterations * .1, vol1Count, iterations / 10.0);
assertEquals(iterations * .9, vol2Count, iterations / 10.0);
}
@Test
public void testWithNoCaching() {
testSpecificSetup(10L, 90L, "0", iterations, true);
makeChoices();
assertEquals(iterations * .1, vol1Count, iterations / 10.0);
assertEquals(iterations * .9, vol2Count, iterations / 10.0);
}
private void makeChoices() {
SpaceAwareVolumeChooser chooser = new SpaceAwareVolumeChooser() {
@Override
protected double getFreeSpace(String uri) {
if (uri.equals(volumeOne)) {
return free1;
}
if (uri.equals(volumeTwo)) {
return free2;
}
throw new IllegalArgumentException();
}
};
for (int i = 0; i < iterations; i++) {
String choice = chooser.choose(chooserEnv, tableDirs);
if (choice.equals(volumeOne)) {
vol1Count += 1;
}
if (choice.equals(volumeTwo)) {
vol2Count += 1;
}
}
}
}
| 9,383 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/fs/PreferredVolumeChooserTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.fs;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.createStrictMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.util.Optional;
import java.util.Set;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.accumulo.core.spi.common.ServiceEnvironment.Configuration;
import org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment.Scope;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class PreferredVolumeChooserTest {
private static final String TABLE_CUSTOM_SUFFIX = "volume.preferred";
private static final String getCustomPropertySuffix(Scope scope) {
return "volume.preferred." + scope.name().toLowerCase();
}
private static final Set<String> ALL_OPTIONS = Set.of("1", "2", "3");
private ServiceEnvironment serviceEnv;
private Configuration tableConf;
private Configuration systemConf;
private PreferredVolumeChooser chooser;
@BeforeEach
public void before() {
serviceEnv = createStrictMock(ServiceEnvironment.class);
chooser = new PreferredVolumeChooser();
tableConf = createStrictMock(Configuration.class);
systemConf = createStrictMock(Configuration.class);
expect(serviceEnv.getConfiguration(anyObject())).andReturn(tableConf).anyTimes();
expect(serviceEnv.getConfiguration()).andReturn(systemConf).anyTimes();
}
@AfterEach
public void after() {
verify(serviceEnv, tableConf, systemConf);
}
private Set<String> chooseForTable() {
VolumeChooserEnvironment env = new VolumeChooserEnvironment() {
@Override
public Text getEndRow() {
return null;
}
@Override
public Optional<TableId> getTable() {
return Optional.of(TableId.of("testTable"));
}
@Override
public Scope getChooserScope() {
return Scope.TABLE;
}
@Override
public ServiceEnvironment getServiceEnv() {
return serviceEnv;
}
};
return chooser.getPreferredVolumes(env, ALL_OPTIONS);
}
private Set<String> choose(Scope scope) {
VolumeChooserEnvironment env = new VolumeChooserEnvironment() {
@Override
public Text getEndRow() {
return null;
}
@Override
public Optional<TableId> getTable() {
return Optional.empty();
}
@Override
public Scope getChooserScope() {
return scope;
}
@Override
public ServiceEnvironment getServiceEnv() {
return serviceEnv;
}
};
return chooser.getPreferredVolumes(env, ALL_OPTIONS);
}
@Test
public void testTableScopeUsingTableProperty() {
expect(tableConf.getTableCustom(TABLE_CUSTOM_SUFFIX)).andReturn("2,1");
replay(serviceEnv, tableConf, systemConf);
assertEquals(Set.of("1", "2"), chooseForTable());
}
@Test
public void testTableScopeUsingDefaultScopeProperty() {
expect(tableConf.getTableCustom(TABLE_CUSTOM_SUFFIX)).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT))).andReturn("3,2").once();
replay(serviceEnv, tableConf, systemConf);
assertEquals(Set.of("2", "3"), chooseForTable());
}
@Test
public void testTableScopeWithNoConfig() {
expect(tableConf.getTableCustom(TABLE_CUSTOM_SUFFIX)).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT))).andReturn(null).once();
replay(serviceEnv, tableConf, systemConf);
assertThrows(RuntimeException.class, this::chooseForTable);
}
@Test
public void testTableScopeWithEmptySet() {
expect(tableConf.getTableCustom(TABLE_CUSTOM_SUFFIX)).andReturn(",").once();
replay(serviceEnv, tableConf, systemConf);
assertThrows(RuntimeException.class, this::chooseForTable);
}
@Test
public void testTableScopeWithUnrecognizedVolumes() {
expect(tableConf.getTableCustom(TABLE_CUSTOM_SUFFIX)).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT))).andReturn("4").once();
replay(serviceEnv, tableConf, systemConf);
assertThrows(RuntimeException.class, this::chooseForTable);
}
@Test
public void testLoggerScopeUsingLoggerProperty() {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.LOGGER))).andReturn("2,1").once();
replay(serviceEnv, tableConf, systemConf);
assertEquals(Set.of("1", "2"), choose(Scope.LOGGER));
}
@Test
public void testLoggerScopeUsingDefaultProperty() {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.LOGGER))).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT))).andReturn("3,2").once();
replay(serviceEnv, tableConf, systemConf);
assertEquals(Set.of("2", "3"), choose(Scope.LOGGER));
}
@Test
public void testLoggerScopeWithNoConfig() {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.LOGGER))).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT))).andReturn(null).once();
replay(serviceEnv, tableConf, systemConf);
assertThrows(RuntimeException.class, () -> choose(Scope.LOGGER));
}
@Test
public void testLoggerScopeWithEmptySet() {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.LOGGER))).andReturn(",").once();
replay(serviceEnv, tableConf, systemConf);
assertThrows(RuntimeException.class, () -> choose(Scope.LOGGER));
}
@Test
public void testLoggerScopeWithUnrecognizedVolumes() {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.LOGGER))).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT))).andReturn("4").once();
replay(serviceEnv, tableConf, systemConf);
assertThrows(RuntimeException.class, () -> choose(Scope.LOGGER));
}
@Test
public void testInitScopeUsingInitProperty() {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.INIT))).andReturn("2,1").once();
replay(serviceEnv, tableConf, systemConf);
assertEquals(Set.of("1", "2"), choose(Scope.INIT));
}
@Test
public void testInitScopeUsingDefaultProperty() {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.INIT))).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT))).andReturn("3,2").once();
replay(serviceEnv, tableConf, systemConf);
assertEquals(Set.of("2", "3"), choose(Scope.INIT));
}
}
| 9,384 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/spi/fs/DelegatingChooserTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.spi.fs;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.createStrictMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.util.Optional;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.spi.common.ServiceEnvironment;
import org.apache.accumulo.core.spi.common.ServiceEnvironment.Configuration;
import org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment.Scope;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class DelegatingChooserTest {
private static final String TABLE_CUSTOM_SUFFIX = "volume.chooser";
private static final String getCustomPropertySuffix(Scope scope) {
return "volume.chooser." + scope.name().toLowerCase();
}
private ServiceEnvironment serviceEnv;
private Configuration tableConf;
private DelegatingChooser chooser;
private Configuration systemConf;
public static class MockChooser1 extends RandomVolumeChooser {}
public static class MockChooser2 extends RandomVolumeChooser {}
@BeforeEach
public void before() {
serviceEnv = createStrictMock(ServiceEnvironment.class);
chooser = new DelegatingChooser();
tableConf = createStrictMock(Configuration.class);
systemConf = createStrictMock(Configuration.class);
expect(serviceEnv.getConfiguration(anyObject())).andReturn(tableConf).anyTimes();
expect(serviceEnv.getConfiguration()).andReturn(systemConf).anyTimes();
}
@AfterEach
public void after() {
verify(serviceEnv, tableConf, systemConf);
}
private VolumeChooser getTableDelegate() {
VolumeChooserEnvironment env = new VolumeChooserEnvironment() {
@Override
public Text getEndRow() {
return null;
}
@Override
public Optional<TableId> getTable() {
return Optional.of(TableId.of("testTable"));
}
@Override
public Scope getChooserScope() {
// TODO Auto-generated method stub
return Scope.TABLE;
}
@Override
public ServiceEnvironment getServiceEnv() {
return serviceEnv;
}
};
return chooser.getDelegateChooser(env);
}
private VolumeChooser getDelegate(Scope scope) {
VolumeChooserEnvironment env = new VolumeChooserEnvironment() {
@Override
public Text getEndRow() {
return null;
}
@Override
public Optional<TableId> getTable() {
return Optional.empty();
}
@Override
public Scope getChooserScope() {
return scope;
}
@Override
public ServiceEnvironment getServiceEnv() {
return serviceEnv;
}
};
return chooser.getDelegateChooser(env);
}
@Test
public void testTableScopeUsingTableProperty() throws Exception {
expect(tableConf.getTableCustom(TABLE_CUSTOM_SUFFIX)).andReturn(MockChooser1.class.getName());
expect(serviceEnv.instantiate(TableId.of("testTable"), MockChooser1.class.getName(),
VolumeChooser.class)).andReturn(new MockChooser1());
replay(serviceEnv, tableConf, systemConf);
VolumeChooser delegate = getTableDelegate();
assertSame(MockChooser1.class, delegate.getClass());
}
@Test
public void testTableScopeUsingDefaultScopeProperty() throws Exception {
expect(tableConf.getTableCustom(TABLE_CUSTOM_SUFFIX)).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT)))
.andReturn(MockChooser2.class.getName()).once();
expect(serviceEnv.instantiate(TableId.of("testTable"), MockChooser2.class.getName(),
VolumeChooser.class)).andReturn(new MockChooser2());
replay(serviceEnv, tableConf, systemConf);
VolumeChooser delegate = getTableDelegate();
assertSame(MockChooser2.class, delegate.getClass());
}
@Test
public void testTableScopeWithNoConfig() {
expect(tableConf.getTableCustom(TABLE_CUSTOM_SUFFIX)).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT))).andReturn(null).once();
replay(serviceEnv, tableConf, systemConf);
assertThrows(RuntimeException.class, this::getTableDelegate);
}
@Test
public void testTableScopeWithBadDelegate() throws Exception {
expect(tableConf.getTableCustom(TABLE_CUSTOM_SUFFIX)).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT)))
.andReturn("not a valid class name").once();
expect(serviceEnv.instantiate(TableId.of("testTable"), "not a valid class name",
VolumeChooser.class)).andThrow(new RuntimeException());
replay(serviceEnv, tableConf, systemConf);
assertThrows(RuntimeException.class, this::getTableDelegate);
}
@Test
public void testLoggerScopeUsingLoggerProperty() throws Exception {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.LOGGER)))
.andReturn(MockChooser1.class.getName()).once();
expect(serviceEnv.instantiate(MockChooser1.class.getName(), VolumeChooser.class))
.andReturn(new MockChooser1());
replay(serviceEnv, tableConf, systemConf);
VolumeChooser delegate = getDelegate(Scope.LOGGER);
assertSame(MockChooser1.class, delegate.getClass());
}
@Test
public void testLoggerScopeUsingDefaultProperty() throws Exception {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.LOGGER))).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT)))
.andReturn(MockChooser2.class.getName()).once();
expect(serviceEnv.instantiate(MockChooser2.class.getName(), VolumeChooser.class))
.andReturn(new MockChooser2());
replay(serviceEnv, tableConf, systemConf);
VolumeChooser delegate = getDelegate(Scope.LOGGER);
assertSame(MockChooser2.class, delegate.getClass());
}
@Test
public void testLoggerScopeWithNoConfig() {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.LOGGER))).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT))).andReturn(null).once();
replay(serviceEnv, tableConf, systemConf);
assertThrows(RuntimeException.class, () -> getDelegate(Scope.LOGGER));
}
@Test
public void testLoggerScopeWithBadDelegate() throws Exception {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.LOGGER))).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT)))
.andReturn("not a valid class name").once();
expect(serviceEnv.instantiate("not a valid class name", VolumeChooser.class))
.andThrow(new RuntimeException());
replay(serviceEnv, tableConf, systemConf);
assertThrows(RuntimeException.class, () -> getDelegate(Scope.LOGGER));
}
@Test
public void testInitScopeUsingInitProperty() throws Exception {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.INIT)))
.andReturn(MockChooser1.class.getName()).once();
expect(serviceEnv.instantiate(MockChooser1.class.getName(), VolumeChooser.class))
.andReturn(new MockChooser1());
replay(serviceEnv, tableConf, systemConf);
VolumeChooser delegate = getDelegate(Scope.INIT);
assertSame(MockChooser1.class, delegate.getClass());
}
@Test
public void testInitScopeUsingDefaultProperty() throws Exception {
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.INIT))).andReturn(null).once();
expect(systemConf.getCustom(getCustomPropertySuffix(Scope.DEFAULT)))
.andReturn(MockChooser2.class.getName()).once();
expect(serviceEnv.instantiate(MockChooser2.class.getName(), VolumeChooser.class))
.andReturn(new MockChooser2());
replay(serviceEnv, tableConf, systemConf);
VolumeChooser delegate = getDelegate(Scope.INIT);
assertSame(MockChooser2.class, delegate.getClass());
}
}
| 9,385 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/ConditionalComparatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Comparator;
import org.apache.accumulo.core.data.Condition;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.junit.jupiter.api.Test;
public class ConditionalComparatorTest {
@Test
public void testComparator() {
Condition c1 = new Condition("a", "b");
Condition c2 = new Condition("a", "c");
Condition c3 = new Condition("b", "c");
Condition c4 = new Condition("a", "b").setTimestamp(5);
Condition c5 = new Condition("a", "b").setTimestamp(6);
Condition c6 = new Condition("a", "b").setVisibility(new ColumnVisibility("A&B"));
Condition c7 = new Condition("a", "b").setVisibility(new ColumnVisibility("A&C"));
Comparator<Condition> comparator = ConditionalWriterImpl.CONDITION_COMPARATOR;
assertEquals(0, comparator.compare(c1, c1));
assertTrue(comparator.compare(c1, c2) < 0);
assertTrue(comparator.compare(c2, c1) > 0);
assertTrue(comparator.compare(c1, c3) < 0);
assertTrue(comparator.compare(c3, c1) > 0);
assertTrue(comparator.compare(c1, c4) < 0);
assertTrue(comparator.compare(c4, c1) > 0);
assertTrue(comparator.compare(c5, c4) < 0);
assertTrue(comparator.compare(c4, c5) > 0);
assertTrue(comparator.compare(c1, c7) < 0);
assertTrue(comparator.compare(c7, c1) > 0);
assertTrue(comparator.compare(c6, c7) < 0);
assertTrue(comparator.compare(c7, c6) > 0);
}
}
| 9,386 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/DelegationTokenConfigSerializerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.concurrent.TimeUnit.HOURS;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
import org.apache.accumulo.core.securityImpl.thrift.TDelegationTokenConfig;
import org.junit.jupiter.api.Test;
public class DelegationTokenConfigSerializerTest {
@Test
public void test() {
DelegationTokenConfig cfg = new DelegationTokenConfig();
cfg.setTokenLifetime(8323, HOURS);
TDelegationTokenConfig tCfg = DelegationTokenConfigSerializer.serialize(cfg);
assertEquals(tCfg.getLifetime(), cfg.getTokenLifetime(MILLISECONDS));
assertEquals(cfg, DelegationTokenConfigSerializer.deserialize(tCfg));
}
}
| 9,387 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/ClientContextTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import java.io.File;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.ConditionalWriterConfig;
import org.apache.accumulo.core.client.Durability;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.conf.ConfigurationTypeHelper;
import org.apache.accumulo.core.conf.Property;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class ClientContextTest {
private static final String keystoreName = "/site-cfg.jceks";
// site-cfg.jceks={'ignored.property'=>'ignored', 'instance.secret'=>'mysecret',
// 'general.rpc.timeout'=>'timeout'}
private static File keystore;
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "provided keystoreUrl path isn't user provided")
@BeforeAll
public static void setUpBeforeAll() {
URL keystoreUrl = ClientContextTest.class.getResource(keystoreName);
assertNotNull(keystoreUrl, "Could not find " + keystoreName);
keystore = new File(keystoreUrl.getFile());
}
protected String getKeyStoreUrl(File absoluteFilePath) {
return "jceks://file" + absoluteFilePath.getAbsolutePath();
}
@Test
public void loadSensitivePropertyFromCredentialProvider() {
String absPath = getKeyStoreUrl(keystore);
Properties props = new Properties();
props.setProperty(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey(), absPath);
AccumuloConfiguration accClientConf = ClientConfConverter.toAccumuloConf(props);
assertEquals("mysecret", accClientConf.get(Property.INSTANCE_SECRET));
}
@Test
public void defaultValueForSensitiveProperty() {
Properties props = new Properties();
AccumuloConfiguration accClientConf = ClientConfConverter.toAccumuloConf(props);
assertEquals(Property.INSTANCE_SECRET.getDefaultValue(),
accClientConf.get(Property.INSTANCE_SECRET));
}
@Test
public void sensitivePropertiesIncludedInProperties() {
String absPath = getKeyStoreUrl(keystore);
Properties clientProps = new Properties();
clientProps.setProperty(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey(), absPath);
AccumuloConfiguration accClientConf = ClientConfConverter.toAccumuloConf(clientProps);
Map<String,String> props = new HashMap<>();
accClientConf.getProperties(props, x -> true);
// Only sensitive properties are added
assertEquals(Property.GENERAL_RPC_TIMEOUT.getDefaultValue(),
props.get(Property.GENERAL_RPC_TIMEOUT.getKey()));
// Only known properties are added
assertFalse(props.containsKey("ignored.property"));
assertEquals("mysecret", props.get(Property.INSTANCE_SECRET.getKey()));
}
@Test
public void testGetBatchWriterConfigUsingDefaults() {
Properties props = new Properties();
BatchWriterConfig batchWriterConfig = ClientContext.getBatchWriterConfig(props);
assertNotNull(batchWriterConfig);
long expectedMemory = ConfigurationTypeHelper
.getMemoryAsBytes(ClientProperty.BATCH_WRITER_MEMORY_MAX.getDefaultValue());
assertEquals(expectedMemory, batchWriterConfig.getMaxMemory());
// If the value of BATCH_WRITE_LATENCY_MAX or BATCH_WRITER_TIMEOUT_MAX, is set to zero,
// Long.MAX_VALUE is returned. Effectively, this will cause data to be held in memory
// indefinitely for BATCH_WRITE_LATENCY_MAX and for no timeout, for BATCH_WRITER_TIMEOUT_MAX.
long expectedLatency = ConfigurationTypeHelper
.getTimeInMillis(ClientProperty.BATCH_WRITER_LATENCY_MAX.getDefaultValue());
// default latency should be 120000 ms
assertEquals(120000L, expectedLatency);
assertEquals(expectedLatency, batchWriterConfig.getMaxLatency(MILLISECONDS));
long expectedTimeout = ConfigurationTypeHelper
.getTimeInMillis(ClientProperty.BATCH_WRITER_TIMEOUT_MAX.getDefaultValue());
if (expectedTimeout == 0) {
expectedTimeout = Long.MAX_VALUE;
}
assertEquals(expectedTimeout, Long.MAX_VALUE);
assertEquals(expectedTimeout, batchWriterConfig.getTimeout(MILLISECONDS));
int expectedThreads =
Integer.parseInt(ClientProperty.BATCH_WRITER_THREADS_MAX.getDefaultValue());
assertEquals(expectedThreads, batchWriterConfig.getMaxWriteThreads());
Durability expectedDurability =
Durability.valueOf(ClientProperty.BATCH_WRITER_DURABILITY.getDefaultValue().toUpperCase());
assertEquals(expectedDurability, batchWriterConfig.getDurability());
}
@Test
public void testGetBatchWriterConfigNotUsingDefaults() {
Properties props = new Properties();
// set properties to non-default values
props.setProperty(ClientProperty.BATCH_WRITER_MEMORY_MAX.getKey(), "10M");
props.setProperty(ClientProperty.BATCH_WRITER_LATENCY_MAX.getKey(), "40");
props.setProperty(ClientProperty.BATCH_WRITER_TIMEOUT_MAX.getKey(), "15");
props.setProperty(ClientProperty.BATCH_WRITER_THREADS_MAX.getKey(), "12");
props.setProperty(ClientProperty.BATCH_WRITER_DURABILITY.getKey(), Durability.FLUSH.name());
BatchWriterConfig batchWriterConfig = ClientContext.getBatchWriterConfig(props);
assertNotNull(batchWriterConfig);
long expectedMemory = ConfigurationTypeHelper
.getMemoryAsBytes(ClientProperty.BATCH_WRITER_MEMORY_MAX.getValue(props));
assertEquals(expectedMemory, batchWriterConfig.getMaxMemory());
assertEquals(40, batchWriterConfig.getMaxLatency(SECONDS));
assertEquals(40000, batchWriterConfig.getMaxLatency(MILLISECONDS));
assertEquals(15, batchWriterConfig.getTimeout(SECONDS));
assertEquals(15000, batchWriterConfig.getTimeout(MILLISECONDS));
long expectedThreads = ClientProperty.BATCH_WRITER_THREADS_MAX.getInteger(props);
assertEquals(expectedThreads, batchWriterConfig.getMaxWriteThreads());
Durability expectedDurability =
Durability.valueOf(ClientProperty.BATCH_WRITER_DURABILITY.getValue(props).toUpperCase());
assertEquals(expectedDurability, batchWriterConfig.getDurability());
}
@Test
public void testGetConditionalWriterConfigUsingDefaults() {
Properties props = new Properties();
ConditionalWriterConfig conditionalWriterConfig =
ClientContext.getConditionalWriterConfig(props);
assertNotNull(conditionalWriterConfig);
// If the value of CONDITIONAL_WRITER_TIMEOUT_MAX is set to zero, Long.MAX_VALUE is returned.
// Effectively, this indicates there is no timeout for CONDITIONAL_WRITER_TIMEOUT_MAX
long expectedTimeout = ConfigurationTypeHelper
.getTimeInMillis(ClientProperty.CONDITIONAL_WRITER_TIMEOUT_MAX.getDefaultValue());
if (expectedTimeout == 0) {
expectedTimeout = Long.MAX_VALUE;
}
assertEquals(expectedTimeout, Long.MAX_VALUE);
assertEquals(expectedTimeout, conditionalWriterConfig.getTimeout(MILLISECONDS));
int expectedThreads =
Integer.parseInt(ClientProperty.CONDITIONAL_WRITER_THREADS_MAX.getDefaultValue());
assertEquals(expectedThreads, conditionalWriterConfig.getMaxWriteThreads());
Durability expectedDurability = Durability
.valueOf(ClientProperty.CONDITIONAL_WRITER_DURABILITY.getDefaultValue().toUpperCase());
assertEquals(expectedDurability, conditionalWriterConfig.getDurability());
}
@Test
public void testGetConditionalWriterConfigNotUsingDefaults() {
Properties props = new Properties();
// set properties to non-default values
props.setProperty(ClientProperty.CONDITIONAL_WRITER_TIMEOUT_MAX.getKey(), "17");
props.setProperty(ClientProperty.CONDITIONAL_WRITER_THREADS_MAX.getKey(), "14");
props.setProperty(ClientProperty.CONDITIONAL_WRITER_DURABILITY.getKey(),
Durability.SYNC.name());
ConditionalWriterConfig conditionalWriterConfig =
ClientContext.getConditionalWriterConfig(props);
assertNotNull(conditionalWriterConfig);
assertEquals(17, conditionalWriterConfig.getTimeout(SECONDS));
assertEquals(17000, conditionalWriterConfig.getTimeout(MILLISECONDS));
long expectedThreads = ClientProperty.CONDITIONAL_WRITER_THREADS_MAX.getInteger(props);
assertEquals(expectedThreads, conditionalWriterConfig.getMaxWriteThreads());
Durability expectedDurability = Durability
.valueOf(ClientProperty.CONDITIONAL_WRITER_DURABILITY.getValue(props).toUpperCase());
assertEquals(expectedDurability, conditionalWriterConfig.getDurability());
}
}
| 9,388 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/RootTabletLocatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.clientImpl.TabletLocatorImpl.TabletServerLockChecker;
import org.apache.accumulo.core.fate.zookeeper.ZooCache;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class RootTabletLocatorTest {
private ClientContext context;
private TabletServerLockChecker lockChecker;
private ZooCache zc;
private RootTabletLocator rtl;
@BeforeEach
public void setUp() {
context = createMock(ClientContext.class);
expect(context.getZooKeeperRoot()).andReturn("/accumulo/iid").anyTimes();
zc = createMock(ZooCache.class);
expect(context.getZooCache()).andReturn(zc).anyTimes();
replay(context);
lockChecker = createMock(TabletServerLockChecker.class);
rtl = new RootTabletLocator(lockChecker);
}
@Test
public void testInvalidateCache_Server() {
zc.clear(context.getZooKeeperRoot() + Constants.ZTSERVERS + "/server");
replay(zc);
rtl.invalidateCache(context, "server");
verify(zc);
}
}
| 9,389 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/TabletServerBatchReaderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.util.threads.ThreadPools;
import org.easymock.EasyMock;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class TabletServerBatchReaderTest {
private ClientContext context;
@BeforeEach
public void setup() {
context = EasyMock.createMock(ClientContext.class);
EasyMock.expect(context.threadPools()).andReturn(ThreadPools.getServerThreadPools());
EasyMock.replay(context);
}
@Test
public void testGetAuthorizations() {
Authorizations expected = new Authorizations("a,b");
try (BatchScanner s =
new TabletServerBatchReader(context, TableId.of("foo"), "fooName", expected, 1)) {
assertEquals(expected, s.getAuthorizations());
}
}
@Test
public void testNullAuthorizationsFails() {
assertThrows(IllegalArgumentException.class,
() -> new TabletServerBatchReader(context, TableId.of("foo"), "fooName", null, 1));
}
}
| 9,390 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/ThriftTransportKeyTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Properties;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.KerberosToken;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.rpc.SaslConnectionParams;
import org.apache.accumulo.core.rpc.SslConnectionParams;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.security.UserGroupInformation;
import org.easymock.EasyMock;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import com.google.common.net.HostAndPort;
public class ThriftTransportKeyTest {
private static final String primary = "accumulo";
@BeforeEach
public void setup() {
System.setProperty("java.security.krb5.realm", "accumulo");
System.setProperty("java.security.krb5.kdc", "fake");
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
}
private static SaslConnectionParams createSaslParams(AuthenticationToken token) {
Properties props = new Properties();
props.setProperty(ClientProperty.SASL_KERBEROS_SERVER_PRIMARY.getKey(), primary);
props.setProperty(ClientProperty.SASL_ENABLED.getKey(), "true");
return new SaslConnectionParams(props, token);
}
@Test
public void testSslAndSaslErrors() {
ClientContext clientCtx = createMock(ClientContext.class);
SslConnectionParams sslParams = createMock(SslConnectionParams.class);
SaslConnectionParams saslParams = createMock(SaslConnectionParams.class);
expect(clientCtx.getClientSslParams()).andReturn(sslParams).anyTimes();
expect(clientCtx.getSaslParams()).andReturn(saslParams).anyTimes();
// We don't care to verify the sslparam or saslparam mocks
replay(clientCtx);
try {
assertThrows(RuntimeException.class,
() -> new ThriftTransportKey(HostAndPort.fromParts("localhost", 9999), 120_000,
clientCtx));
} finally {
verify(clientCtx);
}
}
@Test
public void testConnectionCaching() throws IOException, InterruptedException {
UserGroupInformation user1 = UserGroupInformation.createUserForTesting("user1", new String[0]);
final KerberosToken token = EasyMock.createMock(KerberosToken.class);
// A first instance of the SASL cnxn params
SaslConnectionParams saslParams1 =
user1.doAs((PrivilegedExceptionAction<SaslConnectionParams>) () -> createSaslParams(token));
// A second instance of what should be the same SaslConnectionParams
SaslConnectionParams saslParams2 =
user1.doAs((PrivilegedExceptionAction<SaslConnectionParams>) () -> createSaslParams(token));
ThriftTransportKey ttk1 =
new ThriftTransportKey(HostAndPort.fromParts("localhost", 9997), 1L, null, saslParams1),
ttk2 =
new ThriftTransportKey(HostAndPort.fromParts("localhost", 9997), 1L, null, saslParams2);
// Should equals() and hashCode() to make sure we don't throw away thrift cnxns
assertEquals(ttk1, ttk2);
assertEquals(ttk1.hashCode(), ttk2.hashCode());
}
@Test
public void testSaslPrincipalIsSignificant() throws IOException, InterruptedException {
UserGroupInformation user1 = UserGroupInformation.createUserForTesting("user1", new String[0]);
final KerberosToken token = EasyMock.createMock(KerberosToken.class);
SaslConnectionParams saslParams1 =
user1.doAs((PrivilegedExceptionAction<SaslConnectionParams>) () -> createSaslParams(token));
UserGroupInformation user2 = UserGroupInformation.createUserForTesting("user2", new String[0]);
SaslConnectionParams saslParams2 =
user2.doAs((PrivilegedExceptionAction<SaslConnectionParams>) () -> createSaslParams(token));
ThriftTransportKey ttk1 =
new ThriftTransportKey(HostAndPort.fromParts("localhost", 9997), 1L, null, saslParams1),
ttk2 =
new ThriftTransportKey(HostAndPort.fromParts("localhost", 9997), 1L, null, saslParams2);
assertNotEquals(ttk1, ttk2);
assertNotEquals(ttk1.hashCode(), ttk2.hashCode());
}
@Test
public void testSimpleEquivalence() {
ClientContext clientCtx = createMock(ClientContext.class);
expect(clientCtx.getClientSslParams()).andReturn(null).anyTimes();
expect(clientCtx.getSaslParams()).andReturn(null).anyTimes();
replay(clientCtx);
ThriftTransportKey ttk =
new ThriftTransportKey(HostAndPort.fromParts("localhost", 9999), 120_000, clientCtx);
assertEquals(ttk, ttk, "Normal ThriftTransportKey doesn't equal itself");
}
}
| 9,391 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/ScanAttemptsImplTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.apache.accumulo.core.spi.scan.ConfigurableScanServerSelectorTest.nti;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.accumulo.core.data.TabletId;
import org.apache.accumulo.core.spi.scan.ScanServerAttempt;
import org.junit.jupiter.api.Test;
public class ScanAttemptsImplTest {
private Map<TabletId,Collection<String>>
simplify(Map<TabletId,Collection<ScanServerAttemptImpl>> map) {
Map<TabletId,Collection<String>> ret = new HashMap<>();
map.forEach((tabletId, scanAttempts) -> {
Set<String> stringAttempts = new HashSet<>();
scanAttempts.forEach(scanAttempt -> stringAttempts
.add(scanAttempt.getServer() + "_" + scanAttempt.getResult()));
ret.put(tabletId, stringAttempts);
});
return ret;
}
@Test
public void testBasic() {
ScanServerAttemptsImpl sai = new ScanServerAttemptsImpl();
var snap1 = sai.snapshot();
assertEquals(Map.of(), snap1);
var tablet1 = nti("1", "a");
var reporter1 = sai.createReporter("ss1:1", tablet1);
reporter1.report(ScanServerAttempt.Result.BUSY);
assertEquals(Map.of(), snap1);
var snap2 = sai.snapshot();
assertEquals(Map.of(tablet1, Set.of("ss1:1_BUSY")), simplify(snap2));
reporter1.report(ScanServerAttempt.Result.ERROR);
assertEquals(Map.of(), snap1);
assertEquals(Map.of(tablet1, Set.of("ss1:1_BUSY")), simplify(snap2));
var snap3 = sai.snapshot();
assertEquals(Map.of(tablet1, Set.of("ss1:1_BUSY", "ss1:1_ERROR")), simplify(snap3));
var tablet2 = nti("1", "m");
var reporter2 = sai.createReporter("ss1:1", tablet2);
var tablet3 = nti("2", "r");
var reporter3 = sai.createReporter("ss2:2", tablet3);
reporter2.report(ScanServerAttempt.Result.BUSY);
reporter3.report(ScanServerAttempt.Result.ERROR);
var snap4 = sai.snapshot();
assertEquals(Map.of(), snap1);
assertEquals(Map.of(tablet1, Set.of("ss1:1_BUSY")), simplify(snap2));
assertEquals(Map.of(tablet1, Set.of("ss1:1_BUSY", "ss1:1_ERROR")), simplify(snap3));
assertEquals(Map.of(tablet1, Set.of("ss1:1_BUSY", "ss1:1_ERROR"), tablet2, Set.of("ss1:1_BUSY"),
tablet3, Set.of("ss2:2_ERROR")), simplify(snap4));
}
}
| 9,392 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/ZookeeperLockCheckerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.fate.zookeeper.ZooCache;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ZookeeperLockCheckerTest {
private ClientContext context;
private ZooCache zc;
private ZookeeperLockChecker zklc;
@BeforeEach
public void setUp() {
context = createMock(ClientContext.class);
expect(context.getZooKeeperRoot()).andReturn("/accumulo/iid").anyTimes();
zc = createMock(ZooCache.class);
expect(context.getZooCache()).andReturn(zc).anyTimes();
replay(context);
zklc = new ZookeeperLockChecker(context);
}
@Test
public void testInvalidateCache() {
zc.clear(context.getZooKeeperRoot() + Constants.ZTSERVERS + "/server");
replay(zc);
zklc.invalidateCache("server");
verify(zc);
}
}
| 9,393 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/ThriftScannerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.Test;
/**
* Test calls for {@link ThriftScanner}.
*/
public class ThriftScannerTest {
private static boolean withinTenPercent(long expected, long actual) {
long delta = Math.max(expected / 10, 1);
return actual >= (expected - delta) && actual <= (expected + delta);
}
@Test
public void testPauseIncrease() throws Exception {
long newPause = ThriftScanner.pause(5L, 5000L, false);
assertTrue(withinTenPercent(10L, newPause),
"New pause should be within [9,11], but was " + newPause);
}
@Test
public void testMaxPause() throws Exception {
long maxPause = 1L;
long nextPause = ThriftScanner.pause(5L, maxPause, false);
assertTrue(withinTenPercent(maxPause, nextPause),
"New pause should be within [0,2], but was " + nextPause);
}
}
| 9,394 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/ScannerImplTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.security.Authorizations;
import org.easymock.EasyMock;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class ScannerImplTest {
private ClientContext context;
@BeforeEach
public void setup() {
context = EasyMock.createMock(ClientContext.class);
}
@Test
public void testValidReadaheadValues() {
try (var s = new ScannerImpl(context, TableId.of("foo"), Authorizations.EMPTY)) {
s.setReadaheadThreshold(0);
s.setReadaheadThreshold(10);
s.setReadaheadThreshold(Long.MAX_VALUE);
assertEquals(Long.MAX_VALUE, s.getReadaheadThreshold());
}
}
@Test
public void testInValidReadaheadValues() {
try (var s = new ScannerImpl(context, TableId.of("foo"), Authorizations.EMPTY)) {
assertThrows(IllegalArgumentException.class, () -> s.setReadaheadThreshold(-1));
}
}
@Test
public void testGetAuthorizations() {
Authorizations expected = new Authorizations("a,b");
try (var s = new ScannerImpl(context, TableId.of("foo"), expected)) {
assertEquals(expected, s.getAuthorizations());
}
}
@Test
public void testNullAuthorizationsFails() {
assertThrows(IllegalArgumentException.class,
() -> new ScannerImpl(context, TableId.of("foo"), null));
}
}
| 9,395 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/ClientConfConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import java.util.Properties;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.conf.ClientProperty;
import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.junit.jupiter.api.Test;
public class ClientConfConverterTest {
@Test
public void testBasic() {
Properties before = new Properties();
// this will be dropped when converting to AccumuloConfiguration
before.setProperty(ClientProperty.INSTANCE_NAME.getKey(), "instance");
ClientProperty.setPassword(before, "mypass");
before.setProperty(ClientProperty.BATCH_WRITER_THREADS_MAX.getKey(), "5");
// these will map to equivalent in AccumuloConfiguration
before.setProperty(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), "zookeepers");
before.setProperty(ClientProperty.INSTANCE_ZOOKEEPERS_TIMEOUT.getKey(), "20s");
before.setProperty(ClientProperty.SSL_ENABLED.getKey(), "true");
before.setProperty(ClientProperty.SSL_USE_JSSE.getKey(), "true");
before.setProperty(ClientProperty.SSL_KEYSTORE_PATH.getKey(), "key_path");
before.setProperty(ClientProperty.SSL_KEYSTORE_PASSWORD.getKey(), "key_pass");
before.setProperty(ClientProperty.SSL_KEYSTORE_TYPE.getKey(), "jks");
before.setProperty(ClientProperty.SSL_TRUSTSTORE_PATH.getKey(), "trust_path");
before.setProperty(ClientProperty.SSL_TRUSTSTORE_PASSWORD.getKey(), "trust_pass");
before.setProperty(ClientProperty.SSL_TRUSTSTORE_TYPE.getKey(), "jks");
before.setProperty(ClientProperty.SASL_ENABLED.getKey(), "true");
before.setProperty(ClientProperty.SASL_KERBEROS_SERVER_PRIMARY.getKey(), "primary");
before.setProperty(ClientProperty.SASL_QOP.getKey(), "auth-int");
Properties after = ClientConfConverter.toProperties(ClientConfConverter.toAccumuloConf(before));
// some props don't have an equivalent in the AccumuloConfiguration; set them here and check
assertNotEquals(before, after);
ClientProperty.setPassword(after, "mypass");
assertNotEquals(before, after);
after.setProperty(ClientProperty.BATCH_WRITER_THREADS_MAX.getKey(), "5");
assertNotEquals(before, after);
after.setProperty(ClientProperty.INSTANCE_NAME.getKey(), "instance");
assertEquals(before, after);
}
// this test ensures a general property can be set and used by a client
@Test
public void testGeneralPropsWorkAsClientProperties() {
Property prop = Property.GENERAL_RPC_TIMEOUT;
Properties fromUser = new Properties();
fromUser.setProperty(prop.getKey(), "5s");
AccumuloConfiguration converted = ClientConfConverter.toAccumuloConf(fromUser);
// verify that converting client props actually picked up and overrode the default
assertNotEquals(converted.getTimeInMillis(prop),
DefaultConfiguration.getInstance().getTimeInMillis(prop));
// verify that it was set to the expected value set in the client props
assertEquals(SECONDS.toMillis(5), converted.getTimeInMillis(prop));
}
}
| 9,396 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/ScannerBaseTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.partialMockBuilder;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.HashMap;
import java.util.Map;
import org.apache.accumulo.core.client.ScannerBase;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.junit.jupiter.api.Test;
public class ScannerBaseTest {
@Test
public void testScannerBaseForEach() throws Exception {
Map<Key,Value> expected =
Map.of(new Key("row1", "cf1", "cq1"), new Value("v1"), new Key("row2", "cf1", "cq1"),
new Value("v2"), new Key("row3", "cf1", "cq1"), new Value("v3"));
// mock ScannerOptions subclass, because EasyMock can't mock ScannerBase, an interface;
// only the iterator method is mocked, because the forEach method should only call iterator()
ScannerBase scanner =
partialMockBuilder(ScannerOptions.class).addMockedMethod("iterator").createMock();
expect(scanner.iterator()).andReturn(expected.entrySet().iterator()).once();
replay(scanner);
// check the results from forEach; they should match what iterator() returns
Map<Key,Value> actual = new HashMap<>();
scanner.forEach((k, v) -> actual.put(k, v));
assertEquals(expected, actual);
verify(scanner);
}
}
| 9,397 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/TableOperationsHelperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Collection;
import java.util.Collections;
import java.util.ConcurrentModificationException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.function.Consumer;
import java.util.function.Predicate;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.CloneConfiguration;
import org.apache.accumulo.core.client.admin.CompactionConfig;
import org.apache.accumulo.core.client.admin.DiskUsage;
import org.apache.accumulo.core.client.admin.ImportConfiguration;
import org.apache.accumulo.core.client.admin.Locations;
import org.apache.accumulo.core.client.admin.NewTableConfiguration;
import org.apache.accumulo.core.client.admin.SummaryRetriever;
import org.apache.accumulo.core.client.sample.SamplerConfiguration;
import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.Test;
public class TableOperationsHelperTest {
static class Tester extends TableOperationsHelper {
Map<String,Map<String,String>> settings = new HashMap<>();
@Override
public SortedSet<String> list() {
return null;
}
@Override
public boolean exists(String tableName) {
return true;
}
@Override
public void create(String tableName) {}
@Override
public void create(String tableName, NewTableConfiguration ntc) {}
@Override
public void addSplits(String tableName, SortedSet<Text> partitionKeys) {}
@Override
public Collection<Text> listSplits(String tableName) {
return null;
}
@Override
public Collection<Text> listSplits(String tableName, int maxSplits) {
return null;
}
@Override
public Text getMaxRow(String tableName, Authorizations auths, Text startRow,
boolean startInclusive, Text endRow, boolean endInclusive) {
return null;
}
@Override
public void merge(String tableName, Text start, Text end) {
}
@Override
public void deleteRows(String tableName, Text start, Text end) {}
@Override
public void compact(String tableName, Text start, Text end, boolean flush, boolean wait) {}
@Override
public void compact(String tableName, Text start, Text end, List<IteratorSetting> iterators,
boolean flush, boolean wait) {}
@Override
public void compact(String tableName, CompactionConfig config) {}
@Override
public void delete(String tableName) {}
@Override
public void clone(String srcTableName, String newTableName, boolean flush,
Map<String,String> propertiesToSet, Set<String> propertiesToExclude) {}
@Override
public void clone(String srcTableName, String newTableName, CloneConfiguration config) {}
@Override
public void rename(String oldTableName, String newTableName) {}
@Override
public void flush(String tableName) {}
@Override
public void flush(String tableName, Text start, Text end, boolean wait) {}
@Override
public void setProperty(String tableName, String property, String value) {
settings.computeIfAbsent(tableName, k -> new TreeMap<>());
settings.get(tableName).put(property, value);
}
@Override
public Map<String,String> modifyProperties(String tableName,
Consumer<Map<String,String>> mapMutator)
throws IllegalArgumentException, ConcurrentModificationException {
settings.computeIfAbsent(tableName, k -> new TreeMap<>());
var map = settings.get(tableName);
mapMutator.accept(map);
return Map.copyOf(map);
}
@Override
public void removeProperty(String tableName, String property) {
if (!settings.containsKey(tableName)) {
return;
}
settings.get(tableName).remove(property);
}
@Override
public Map<String,String> getConfiguration(String tableName) {
Map<String,String> empty = Collections.emptyMap();
if (!settings.containsKey(tableName)) {
return empty;
}
return settings.get(tableName);
}
@Override
public Map<String,String> getTableProperties(String tableName)
throws AccumuloException, TableNotFoundException {
Map<String,String> empty = Collections.emptyMap();
if (!settings.containsKey(tableName)) {
return empty;
}
return settings.get(tableName);
}
@Override
public void setLocalityGroups(String tableName, Map<String,Set<Text>> groups) {}
@Override
public Map<String,Set<Text>> getLocalityGroups(String tableName) {
return null;
}
@Override
public Set<Range> splitRangeByTablets(String tableName, Range range, int maxSplits) {
return null;
}
@Override
public void offline(String tableName) {}
@Override
public boolean isOnline(String tableName) {
return true;
}
@Override
public void online(String tableName) {}
@Override
public void offline(String tableName, boolean wait) {
}
@Override
public void online(String tableName, boolean wait) {}
@Override
public void clearLocatorCache(String tableName) {}
@Override
public Map<String,String> tableIdMap() {
return null;
}
@Override
public List<DiskUsage> getDiskUsage(Set<String> tables) {
return null;
}
@Override
public void importTable(String tableName, Set<String> exportDir, ImportConfiguration ic) {}
@Override
public void exportTable(String tableName, String exportDir) {}
@Override
public void cancelCompaction(String tableName) {}
@Override
public boolean testClassLoad(String tableName, String className, String asTypeName) {
return false;
}
@Override
public void setSamplerConfiguration(String tableName,
SamplerConfiguration samplerConfiguration) {
throw new UnsupportedOperationException();
}
@Override
public void clearSamplerConfiguration(String tableName) {
throw new UnsupportedOperationException();
}
@Override
public SamplerConfiguration getSamplerConfiguration(String tableName) {
throw new UnsupportedOperationException();
}
@Override
public Locations locate(String tableName, Collection<Range> ranges) {
throw new UnsupportedOperationException();
}
@Override
public SummaryRetriever summaries(String tableName) {
throw new UnsupportedOperationException();
}
@Override
public void addSummarizers(String tableName, SummarizerConfiguration... summarizerConf) {
throw new UnsupportedOperationException();
}
@Override
public void removeSummarizers(String tableName, Predicate<SummarizerConfiguration> predicate) {
throw new UnsupportedOperationException();
}
@Override
public List<SummarizerConfiguration> listSummarizers(String tableName) {
throw new UnsupportedOperationException();
}
}
protected TableOperationsHelper getHelper() {
return new Tester();
}
void check(TableOperationsHelper t, String tablename, String[] values) throws Exception {
Map<String,String> expected = new TreeMap<>();
for (String value : values) {
String[] parts = value.split("=", 2);
expected.put(parts[0], parts[1]);
}
Map<String,String> actual = Map.copyOf(t.getConfiguration(tablename));
assertEquals(expected, actual);
}
@Test
public void testAttachIterator() throws Exception {
TableOperationsHelper t = getHelper();
Map<String,String> empty = Collections.emptyMap();
t.attachIterator("table", new IteratorSetting(10, "someName", "foo.bar", empty),
EnumSet.of(IteratorScope.scan));
check(t, "table", new String[] {"table.iterator.scan.someName=10,foo.bar",});
t.removeIterator("table", "someName", EnumSet.of(IteratorScope.scan));
check(t, "table", new String[] {});
IteratorSetting setting = new IteratorSetting(10, "someName", "foo.bar");
setting.addOptions(Collections.singletonMap("key", "value"));
t.attachIterator("table", setting, EnumSet.of(IteratorScope.majc));
setting = new IteratorSetting(10, "someName", "foo.bar");
t.attachIterator("table", setting, EnumSet.of(IteratorScope.scan));
check(t, "table", new String[] {"table.iterator.majc.someName=10,foo.bar",
"table.iterator.majc.someName.opt.key=value", "table.iterator.scan.someName=10,foo.bar",});
t.removeIterator("table", "someName", EnumSet.of(IteratorScope.scan));
setting = new IteratorSetting(20, "otherName", "some.classname");
setting.addOptions(Collections.singletonMap("key", "value"));
t.attachIterator("table", setting, EnumSet.of(IteratorScope.majc));
setting = new IteratorSetting(20, "otherName", "some.classname");
t.attachIterator("table", setting, EnumSet.of(IteratorScope.scan));
Map<String,EnumSet<IteratorScope>> two = t.listIterators("table");
assertEquals(2, two.size());
assertTrue(two.containsKey("otherName"));
assertEquals(2, two.get("otherName").size());
assertTrue(two.get("otherName").contains(IteratorScope.majc));
assertTrue(two.get("otherName").contains(IteratorScope.scan));
assertTrue(two.containsKey("someName"));
assertEquals(1, two.get("someName").size());
assertTrue(two.get("someName").contains(IteratorScope.majc));
t.removeIterator("table", "someName", EnumSet.allOf(IteratorScope.class));
check(t, "table",
new String[] {"table.iterator.majc.otherName=20,some.classname",
"table.iterator.majc.otherName.opt.key=value",
"table.iterator.scan.otherName=20,some.classname",});
setting = t.getIteratorSetting("table", "otherName", IteratorScope.scan);
assertEquals(20, setting.getPriority());
assertEquals("some.classname", setting.getIteratorClass());
assertTrue(setting.getOptions().isEmpty());
final IteratorSetting setting1 = t.getIteratorSetting("table", "otherName", IteratorScope.majc);
assertEquals(20, setting1.getPriority());
assertEquals("some.classname", setting1.getIteratorClass());
assertFalse(setting1.getOptions().isEmpty());
assertEquals(Collections.singletonMap("key", "value"), setting1.getOptions());
t.attachIterator("table", setting1, EnumSet.of(IteratorScope.minc));
check(t, "table",
new String[] {"table.iterator.majc.otherName=20,some.classname",
"table.iterator.majc.otherName.opt.key=value",
"table.iterator.minc.otherName=20,some.classname",
"table.iterator.minc.otherName.opt.key=value",
"table.iterator.scan.otherName=20,some.classname",});
assertThrows(AccumuloException.class, () -> t.attachIterator("table", setting1));
setting1.setName("thirdName");
assertThrows(AccumuloException.class, () -> t.attachIterator("table", setting1));
setting1.setPriority(10);
t.setProperty("table", "table.iterator.minc.thirdName.opt.key", "value");
assertThrows(AccumuloException.class, () -> t.attachIterator("table", setting1));
t.removeProperty("table", "table.iterator.minc.thirdName.opt.key");
t.attachIterator("table", setting1);
}
}
| 9,398 |
0 | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core | Create_ds/accumulo/core/src/test/java/org/apache/accumulo/core/clientImpl/TabletLocatorImplTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.clientImpl;
import static org.easymock.EasyMock.replay;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.accumulo.core.clientImpl.TabletLocator.TabletLocation;
import org.apache.accumulo.core.clientImpl.TabletLocator.TabletLocations;
import org.apache.accumulo.core.clientImpl.TabletLocator.TabletServerMutations;
import org.apache.accumulo.core.clientImpl.TabletLocatorImpl.TabletLocationObtainer;
import org.apache.accumulo.core.clientImpl.TabletLocatorImpl.TabletServerLockChecker;
import org.apache.accumulo.core.data.InstanceId;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.metadata.MetadataLocationObtainer;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.RootTable;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.CurrentLocationColumnFamily;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
import org.apache.hadoop.io.Text;
import org.easymock.EasyMock;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class TabletLocatorImplTest {
private static final KeyExtent ROOT_TABLE_EXTENT = RootTable.EXTENT;
private static final KeyExtent METADATA_TABLE_EXTENT =
new KeyExtent(MetadataTable.ID, null, ROOT_TABLE_EXTENT.endRow());
static KeyExtent createNewKeyExtent(String table, String endRow, String prevEndRow) {
return new KeyExtent(TableId.of(table), endRow == null ? null : new Text(endRow),
prevEndRow == null ? null : new Text(prevEndRow));
}
static Range createNewRange(String key1, boolean startInclusive, String key2,
boolean endInclusive) {
return new Range(key1 == null ? null : new Text(key1), startInclusive,
key2 == null ? null : new Text(key2), endInclusive);
}
static Range createNewRange(String key1, String key2) {
return new Range(key1 == null ? null : new Text(key1), key2 == null ? null : new Text(key2));
}
static List<Range> createNewRangeList(Range... ranges) {
return List.of(ranges);
}
static class RangeLocation {
String location;
Map<KeyExtent,List<Range>> extents = new HashMap<>();
public RangeLocation(String location, KeyExtent extent1, List<Range> range1) {
this.location = location;
this.extents.put(extent1, range1);
}
public RangeLocation(String location, KeyExtent extent1, List<Range> range1, KeyExtent extent2,
List<Range> range2) {
this.location = location;
this.extents.put(extent1, range1);
this.extents.put(extent2, range2);
}
}
static RangeLocation createRangeLocation(String location, KeyExtent extent, List<Range> ranges) {
return new RangeLocation(location, extent, ranges);
}
static RangeLocation createRangeLocation(String location, KeyExtent extent1, List<Range> range1,
KeyExtent extent2, List<Range> range2) {
return new RangeLocation(location, extent1, range1, extent2, range2);
}
static Map<String,Map<KeyExtent,List<Range>>>
createExpectedBinnings(RangeLocation... rangeLocations) {
Map<String,Map<KeyExtent,List<Range>>> expBinnedRanges = new HashMap<>();
for (RangeLocation rl : rangeLocations) {
Map<KeyExtent,List<Range>> binnedKE =
expBinnedRanges.computeIfAbsent(rl.location, k -> new HashMap<>());
expBinnedRanges.put(rl.location, binnedKE);
binnedKE.putAll(rl.extents);
}
return expBinnedRanges;
}
static TreeMap<KeyExtent,TabletLocation> createMetaCacheKE(Object... data) {
TreeMap<KeyExtent,TabletLocation> mcke = new TreeMap<>();
for (int i = 0; i < data.length; i += 2) {
KeyExtent ke = (KeyExtent) data[i];
String loc = (String) data[i + 1];
mcke.put(ke, new TabletLocation(ke, loc, "1"));
}
return mcke;
}
static TreeMap<Text,TabletLocation> createMetaCache(Object... data) {
TreeMap<KeyExtent,TabletLocation> mcke = createMetaCacheKE(data);
TreeMap<Text,TabletLocation> mc = new TreeMap<>(TabletLocatorImpl.END_ROW_COMPARATOR);
for (Entry<KeyExtent,TabletLocation> entry : mcke.entrySet()) {
if (entry.getKey().endRow() == null) {
mc.put(TabletLocatorImpl.MAX_TEXT, entry.getValue());
} else {
mc.put(entry.getKey().endRow(), entry.getValue());
}
}
return mc;
}
static TabletLocatorImpl createLocators(TServers tservers, String rootTabLoc, String metaTabLoc,
String table, TabletServerLockChecker tslc, Object... data) {
TreeMap<KeyExtent,TabletLocation> mcke = createMetaCacheKE(data);
TestTabletLocationObtainer ttlo = new TestTabletLocationObtainer(tservers);
RootTabletLocator rtl = new TestRootTabletLocator();
TabletLocatorImpl rootTabletCache =
new TabletLocatorImpl(MetadataTable.ID, rtl, ttlo, new YesLockChecker());
TabletLocatorImpl tab1TabletCache =
new TabletLocatorImpl(TableId.of(table), rootTabletCache, ttlo, tslc);
setLocation(tservers, rootTabLoc, ROOT_TABLE_EXTENT, METADATA_TABLE_EXTENT, metaTabLoc);
for (Entry<KeyExtent,TabletLocation> entry : mcke.entrySet()) {
setLocation(tservers, metaTabLoc, METADATA_TABLE_EXTENT, entry.getKey(),
entry.getValue().getTserverLocation());
}
return tab1TabletCache;
}
static TabletLocatorImpl createLocators(TServers tservers, String rootTabLoc, String metaTabLoc,
String table, Object... data) {
return createLocators(tservers, rootTabLoc, metaTabLoc, table, new YesLockChecker(), data);
}
static TabletLocatorImpl createLocators(String table, Object... data) {
TServers tservers = new TServers();
return createLocators(tservers, "tserver1", "tserver2", table, data);
}
private ClientContext context;
private InstanceId iid;
@BeforeEach
public void setUp() {
context = EasyMock.createMock(ClientContext.class);
iid = InstanceId.of("instance1");
EasyMock.expect(context.getRootTabletLocation()).andReturn("tserver1").anyTimes();
EasyMock.expect(context.getInstanceID()).andReturn(iid).anyTimes();
replay(context);
}
private void runTest(List<Range> ranges, TabletLocatorImpl tab1TabletCache,
Map<String,Map<KeyExtent,List<Range>>> expected) throws Exception {
List<Range> failures = Collections.emptyList();
runTest(ranges, tab1TabletCache, expected, failures);
}
private void runTest(List<Range> ranges, TabletLocatorImpl tab1TabletCache,
Map<String,Map<KeyExtent,List<Range>>> expected, List<Range> efailures) throws Exception {
Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<>();
List<Range> f = tab1TabletCache.binRanges(context, ranges, binnedRanges);
assertEquals(expected, binnedRanges);
HashSet<Range> f1 = new HashSet<>(f);
HashSet<Range> f2 = new HashSet<>(efailures);
assertEquals(f2, f1);
}
static Set<KeyExtent> createNewKeyExtentSet(KeyExtent... extents) {
HashSet<KeyExtent> keyExtentSet = new HashSet<>();
Collections.addAll(keyExtentSet, extents);
return keyExtentSet;
}
static void runTest(TreeMap<Text,TabletLocation> metaCache, KeyExtent remove,
Set<KeyExtent> expected) {
// copy so same metaCache can be used for multiple test
metaCache = new TreeMap<>(metaCache);
TabletLocatorImpl.removeOverlapping(metaCache, remove);
HashSet<KeyExtent> eic = new HashSet<>();
for (TabletLocation tl : metaCache.values()) {
eic.add(tl.getExtent());
}
assertEquals(expected, eic);
}
static Mutation createNewMutation(String row, String... data) {
Mutation mut = new Mutation(new Text(row));
for (String element : data) {
String[] cvp = element.split("=");
String[] cols = cvp[0].split(":");
mut.put(cols[0], cols[1], cvp[1]);
}
return mut;
}
static List<Mutation> createNewMutationList(Mutation... ma) {
return List.of(ma);
}
private void runTest(TabletLocatorImpl metaCache, List<Mutation> ml,
Map<String,Map<KeyExtent,List<String>>> emb, String... efailures) throws Exception {
Map<String,TabletServerMutations<Mutation>> binnedMutations = new HashMap<>();
List<Mutation> afailures = new ArrayList<>();
metaCache.binMutations(context, ml, binnedMutations, afailures);
verify(emb, binnedMutations);
ArrayList<String> afs = new ArrayList<>();
ArrayList<String> efs = new ArrayList<>(List.of(efailures));
for (Mutation mutation : afailures) {
afs.add(new String(mutation.getRow()));
}
Collections.sort(afs);
Collections.sort(efs);
assertEquals(efs, afs);
}
private void verify(Map<String,Map<KeyExtent,List<String>>> expected,
Map<String,TabletServerMutations<Mutation>> actual) {
assertEquals(expected.keySet(), actual.keySet());
for (String server : actual.keySet()) {
TabletServerMutations<Mutation> atb = actual.get(server);
Map<KeyExtent,List<String>> etb = expected.get(server);
assertEquals(etb.keySet(), atb.getMutations().keySet());
for (KeyExtent ke : etb.keySet()) {
ArrayList<String> eRows = new ArrayList<>(etb.get(ke));
ArrayList<String> aRows = new ArrayList<>();
for (Mutation m : atb.getMutations().get(ke)) {
aRows.add(new String(m.getRow()));
}
Collections.sort(eRows);
Collections.sort(aRows);
assertEquals(eRows, aRows);
}
}
}
static class ServerExtent {
public String location;
public String row;
public KeyExtent extent;
public ServerExtent(String location, String row, KeyExtent extent) {
this.location = location;
this.row = row;
this.extent = extent;
}
}
static ServerExtent createServerExtent(String row, String location, KeyExtent extent) {
return new ServerExtent(location, row, extent);
}
static Map<String,Map<KeyExtent,List<String>>> createServerExtentMap(ServerExtent... locations) {
Map<String,Map<KeyExtent,List<String>>> serverExtents = new HashMap<>();
for (ServerExtent se : locations) {
serverExtents.computeIfAbsent(se.location, k -> new HashMap<>())
.computeIfAbsent(se.extent, k -> new ArrayList<>()).add(se.row);
}
return serverExtents;
}
@Test
public void testRemoveOverlapping1() {
TreeMap<Text,TabletLocation> mc = createMetaCache(createNewKeyExtent("0", null, null), "l1");
runTest(mc, createNewKeyExtent("0", "a", null), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", null, null), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", null, "a"), createNewKeyExtentSet());
mc = createMetaCache(createNewKeyExtent("0", "g", null), "l1",
createNewKeyExtent("0", "r", "g"), "l1", createNewKeyExtent("0", null, "r"), "l1");
runTest(mc, createNewKeyExtent("0", null, null), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", "a", null), createNewKeyExtentSet(
createNewKeyExtent("0", "r", "g"), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "g", null), createNewKeyExtentSet(
createNewKeyExtent("0", "r", "g"), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "h", null),
createNewKeyExtentSet(createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "r", null),
createNewKeyExtentSet(createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "s", null), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", "b", "a"), createNewKeyExtentSet(
createNewKeyExtent("0", "r", "g"), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "g", "a"), createNewKeyExtentSet(
createNewKeyExtent("0", "r", "g"), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "h", "a"),
createNewKeyExtentSet(createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "r", "a"),
createNewKeyExtentSet(createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "s", "a"), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", "h", "g"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "r", "g"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "s", "g"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", "i", "h"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "r", "h"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "s", "h"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", "z", "f"), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", "z", "g"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", "z", "q"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", "z", "r"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", "r", "g")));
runTest(mc, createNewKeyExtent("0", "z", "s"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", "r", "g")));
runTest(mc, createNewKeyExtent("0", null, "f"), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", null, "g"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", null, "q"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", null, "r"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", "r", "g")));
runTest(mc, createNewKeyExtent("0", null, "s"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", "r", "g")));
}
@Test
public void testRemoveOverlapping2() {
// test removes when cache does not contain all tablets in a table
TreeMap<Text,TabletLocation> mc = createMetaCache(createNewKeyExtent("0", "r", "g"), "l1",
createNewKeyExtent("0", null, "r"), "l1");
runTest(mc, createNewKeyExtent("0", "a", null), createNewKeyExtentSet(
createNewKeyExtent("0", "r", "g"), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "g", null), createNewKeyExtentSet(
createNewKeyExtent("0", "r", "g"), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "h", null),
createNewKeyExtentSet(createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "r", null),
createNewKeyExtentSet(createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "s", null), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", "b", "a"), createNewKeyExtentSet(
createNewKeyExtent("0", "r", "g"), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "g", "a"), createNewKeyExtentSet(
createNewKeyExtent("0", "r", "g"), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "h", "a"),
createNewKeyExtentSet(createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "r", "a"),
createNewKeyExtentSet(createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "s", "a"), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", "h", "g"),
createNewKeyExtentSet(createNewKeyExtent("0", null, "r")));
mc = createMetaCache(createNewKeyExtent("0", "g", null), "l1",
createNewKeyExtent("0", null, "r"), "l1");
runTest(mc, createNewKeyExtent("0", "h", "g"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "h", "a"),
createNewKeyExtentSet(createNewKeyExtent("0", null, "r")));
runTest(mc, createNewKeyExtent("0", "s", "g"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", "s", "a"), createNewKeyExtentSet());
mc = createMetaCache(createNewKeyExtent("0", "g", null), "l1",
createNewKeyExtent("0", "r", "g"), "l1");
runTest(mc, createNewKeyExtent("0", "z", "f"), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", "z", "g"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", "z", "q"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", "z", "r"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", "r", "g")));
runTest(mc, createNewKeyExtent("0", "z", "s"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", "r", "g")));
runTest(mc, createNewKeyExtent("0", null, "f"), createNewKeyExtentSet());
runTest(mc, createNewKeyExtent("0", null, "g"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", null, "q"),
createNewKeyExtentSet(createNewKeyExtent("0", "g", null)));
runTest(mc, createNewKeyExtent("0", null, "r"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", "r", "g")));
runTest(mc, createNewKeyExtent("0", null, "s"), createNewKeyExtentSet(
createNewKeyExtent("0", "g", null), createNewKeyExtent("0", "r", "g")));
}
static class TServers {
private final Map<String,Map<KeyExtent,SortedMap<Key,Value>>> tservers = new HashMap<>();
}
static class TestTabletLocationObtainer implements TabletLocationObtainer {
private final Map<String,Map<KeyExtent,SortedMap<Key,Value>>> tservers;
TestTabletLocationObtainer(TServers tservers) {
this.tservers = tservers.tservers;
}
@Override
public TabletLocations lookupTablet(ClientContext context, TabletLocation src, Text row,
Text stopRow, TabletLocator parent) {
Map<KeyExtent,SortedMap<Key,Value>> tablets = tservers.get(src.getTserverLocation());
if (tablets == null) {
parent.invalidateCache(context, src.getTserverLocation());
return null;
}
SortedMap<Key,Value> tabletData = tablets.get(src.getExtent());
if (tabletData == null) {
parent.invalidateCache(src.getExtent());
return null;
}
// the following clip is done on a tablet, do it here to see if it throws exceptions
src.getExtent().toDataRange().clip(new Range(row, true, stopRow, true));
Key startKey = new Key(row);
Key stopKey = new Key(stopRow).followingKey(PartialKey.ROW);
SortedMap<Key,Value> results = tabletData.tailMap(startKey).headMap(stopKey);
return MetadataLocationObtainer.getMetadataLocationEntries(results);
}
@Override
public List<TabletLocation> lookupTablets(ClientContext context, String tserver,
Map<KeyExtent,List<Range>> map, TabletLocator parent) {
ArrayList<TabletLocation> list = new ArrayList<>();
Map<KeyExtent,SortedMap<Key,Value>> tablets = tservers.get(tserver);
if (tablets == null) {
parent.invalidateCache(context, tserver);
return list;
}
TreeMap<Key,Value> results = new TreeMap<>();
Set<Entry<KeyExtent,List<Range>>> es = map.entrySet();
List<KeyExtent> failures = new ArrayList<>();
for (Entry<KeyExtent,List<Range>> entry : es) {
SortedMap<Key,Value> tabletData = tablets.get(entry.getKey());
if (tabletData == null) {
failures.add(entry.getKey());
continue;
}
List<Range> ranges = entry.getValue();
for (Range range : ranges) {
SortedMap<Key,Value> tm;
if (range.getStartKey() == null) {
tm = tabletData;
} else {
tm = tabletData.tailMap(range.getStartKey());
}
for (Entry<Key,Value> de : tm.entrySet()) {
if (range.afterEndKey(de.getKey())) {
break;
}
if (range.contains(de.getKey())) {
results.put(de.getKey(), de.getValue());
}
}
}
}
if (!failures.isEmpty()) {
parent.invalidateCache(failures);
}
return MetadataLocationObtainer.getMetadataLocationEntries(results).getLocations();
}
}
static class YesLockChecker implements TabletServerLockChecker {
@Override
public boolean isLockHeld(String tserver, String session) {
return true;
}
@Override
public void invalidateCache(String server) {}
}
static class TestRootTabletLocator extends RootTabletLocator {
TestRootTabletLocator() {
super(new YesLockChecker());
}
@Override
protected TabletLocation getRootTabletLocation(ClientContext context) {
return new TabletLocation(RootTable.EXTENT, context.getRootTabletLocation(), "1");
}
@Override
public void invalidateCache(ClientContext context, String server) {}
}
static void createEmptyTablet(TServers tservers, String server, KeyExtent tablet) {
Map<KeyExtent,SortedMap<Key,Value>> tablets =
tservers.tservers.computeIfAbsent(server, k -> new HashMap<>());
SortedMap<Key,Value> tabletData = tablets.computeIfAbsent(tablet, k -> new TreeMap<>());
if (!tabletData.isEmpty()) {
throw new IllegalStateException("Asked for empty tablet, but non empty tablet exists");
}
}
static void clearLocation(TServers tservers, String server, KeyExtent tablet, KeyExtent ke,
String instance) {
Map<KeyExtent,SortedMap<Key,Value>> tablets = tservers.tservers.get(server);
if (tablets == null) {
return;
}
SortedMap<Key,Value> tabletData = tablets.get(tablet);
if (tabletData == null) {
return;
}
Text mr = ke.toMetaRow();
Key lk = new Key(mr, CurrentLocationColumnFamily.NAME, new Text(instance));
tabletData.remove(lk);
}
static void setLocation(TServers tservers, String server, KeyExtent tablet, KeyExtent ke,
String location, String instance) {
Map<KeyExtent,SortedMap<Key,Value>> tablets =
tservers.tservers.computeIfAbsent(server, k -> new HashMap<>());
SortedMap<Key,Value> tabletData = tablets.computeIfAbsent(tablet, k -> new TreeMap<>());
Text mr = ke.toMetaRow();
Value per = TabletColumnFamily.encodePrevEndRow(ke.prevEndRow());
if (location != null) {
if (instance == null) {
instance = "";
}
Key lk = new Key(mr, CurrentLocationColumnFamily.NAME, new Text(instance));
tabletData.put(lk, new Value(location));
}
Key pk = new Key(mr, TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier());
tabletData.put(pk, per);
}
static void setLocation(TServers tservers, String server, KeyExtent tablet, KeyExtent ke,
String location) {
setLocation(tservers, server, tablet, ke, location, "");
}
static void deleteServer(TServers tservers, String server) {
tservers.tservers.remove(server);
}
private void locateTabletTest(TabletLocatorImpl cache, String row, boolean skipRow,
KeyExtent expected, String server) throws Exception {
TabletLocation tl = cache.locateTablet(context, new Text(row), skipRow, false);
if (expected == null) {
if (tl != null) {
System.out.println("tl = " + tl);
}
assertNull(tl);
} else {
assertNotNull(tl);
assertEquals(server, tl.getTserverLocation());
assertEquals(expected, tl.getExtent());
}
}
private void locateTabletTest(TabletLocatorImpl cache, String row, KeyExtent expected,
String server) throws Exception {
locateTabletTest(cache, row, false, expected, server);
}
@Test
public void test1() throws Exception {
TServers tservers = new TServers();
TestTabletLocationObtainer ttlo = new TestTabletLocationObtainer(tservers);
RootTabletLocator rtl = new TestRootTabletLocator();
TabletLocatorImpl rootTabletCache =
new TabletLocatorImpl(MetadataTable.ID, rtl, ttlo, new YesLockChecker());
TabletLocatorImpl tab1TabletCache =
new TabletLocatorImpl(TableId.of("tab1"), rootTabletCache, ttlo, new YesLockChecker());
locateTabletTest(tab1TabletCache, "r1", null, null);
KeyExtent tab1e = createNewKeyExtent("tab1", null, null);
setLocation(tservers, "tserver1", ROOT_TABLE_EXTENT, METADATA_TABLE_EXTENT, "tserver2");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, tab1e, "tserver3");
locateTabletTest(tab1TabletCache, "r1", tab1e, "tserver3");
locateTabletTest(tab1TabletCache, "r2", tab1e, "tserver3");
// simulate a split
KeyExtent tab1e1 = createNewKeyExtent("tab1", "g", null);
KeyExtent tab1e2 = createNewKeyExtent("tab1", null, "g");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, tab1e1, "tserver4");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, tab1e2, "tserver5");
locateTabletTest(tab1TabletCache, "r1", tab1e, "tserver3");
tab1TabletCache.invalidateCache(tab1e);
locateTabletTest(tab1TabletCache, "r1", tab1e2, "tserver5");
locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver4");
locateTabletTest(tab1TabletCache, "a", true, tab1e1, "tserver4");
locateTabletTest(tab1TabletCache, "g", tab1e1, "tserver4");
locateTabletTest(tab1TabletCache, "g", true, tab1e2, "tserver5");
// simulate a partial split
KeyExtent tab1e22 = createNewKeyExtent("tab1", null, "m");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, tab1e22, "tserver6");
locateTabletTest(tab1TabletCache, "r1", tab1e2, "tserver5");
tab1TabletCache.invalidateCache(tab1e2);
locateTabletTest(tab1TabletCache, "r1", tab1e22, "tserver6");
locateTabletTest(tab1TabletCache, "h", null, null);
locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver4");
KeyExtent tab1e21 = createNewKeyExtent("tab1", "m", "g");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, tab1e21, "tserver7");
locateTabletTest(tab1TabletCache, "r1", tab1e22, "tserver6");
locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver7");
locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver4");
// simulate a migration
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, tab1e21, "tserver8");
tab1TabletCache.invalidateCache(tab1e21);
locateTabletTest(tab1TabletCache, "r1", tab1e22, "tserver6");
locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver8");
locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver4");
// simulate a server failure
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, tab1e21, "tserver9");
tab1TabletCache.invalidateCache(context, "tserver8");
locateTabletTest(tab1TabletCache, "r1", tab1e22, "tserver6");
locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver9");
locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver4");
// simulate all servers failing
deleteServer(tservers, "tserver1");
deleteServer(tservers, "tserver2");
tab1TabletCache.invalidateCache(context, "tserver4");
tab1TabletCache.invalidateCache(context, "tserver6");
tab1TabletCache.invalidateCache(context, "tserver9");
locateTabletTest(tab1TabletCache, "r1", null, null);
locateTabletTest(tab1TabletCache, "h", null, null);
locateTabletTest(tab1TabletCache, "a", null, null);
EasyMock.verify(context);
context = EasyMock.createMock(ClientContext.class);
EasyMock.expect(context.getInstanceID()).andReturn(iid).anyTimes();
EasyMock.expect(context.getRootTabletLocation()).andReturn("tserver4").anyTimes();
replay(context);
setLocation(tservers, "tserver4", ROOT_TABLE_EXTENT, METADATA_TABLE_EXTENT, "tserver5");
setLocation(tservers, "tserver5", METADATA_TABLE_EXTENT, tab1e1, "tserver1");
setLocation(tservers, "tserver5", METADATA_TABLE_EXTENT, tab1e21, "tserver2");
setLocation(tservers, "tserver5", METADATA_TABLE_EXTENT, tab1e22, "tserver3");
locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver1");
locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver2");
locateTabletTest(tab1TabletCache, "r", tab1e22, "tserver3");
// simulate the metadata table splitting
KeyExtent mte1 =
new KeyExtent(MetadataTable.ID, tab1e21.toMetaRow(), ROOT_TABLE_EXTENT.endRow());
KeyExtent mte2 = new KeyExtent(MetadataTable.ID, null, tab1e21.toMetaRow());
setLocation(tservers, "tserver4", ROOT_TABLE_EXTENT, mte1, "tserver5");
setLocation(tservers, "tserver4", ROOT_TABLE_EXTENT, mte2, "tserver6");
deleteServer(tservers, "tserver5");
setLocation(tservers, "tserver5", mte1, tab1e1, "tserver7");
setLocation(tservers, "tserver5", mte1, tab1e21, "tserver8");
setLocation(tservers, "tserver6", mte2, tab1e22, "tserver9");
tab1TabletCache.invalidateCache(tab1e1);
tab1TabletCache.invalidateCache(tab1e21);
tab1TabletCache.invalidateCache(tab1e22);
locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver7");
locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver8");
locateTabletTest(tab1TabletCache, "r", tab1e22, "tserver9");
// simulate metadata and regular server down and the reassigned
deleteServer(tservers, "tserver5");
tab1TabletCache.invalidateCache(context, "tserver7");
locateTabletTest(tab1TabletCache, "a", null, null);
locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver8");
locateTabletTest(tab1TabletCache, "r", tab1e22, "tserver9");
setLocation(tservers, "tserver4", ROOT_TABLE_EXTENT, mte1, "tserver10");
setLocation(tservers, "tserver10", mte1, tab1e1, "tserver7");
setLocation(tservers, "tserver10", mte1, tab1e21, "tserver8");
locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver7");
locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver8");
locateTabletTest(tab1TabletCache, "r", tab1e22, "tserver9");
tab1TabletCache.invalidateCache(context, "tserver7");
setLocation(tservers, "tserver10", mte1, tab1e1, "tserver2");
locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver2");
locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver8");
locateTabletTest(tab1TabletCache, "r", tab1e22, "tserver9");
// simulate a hole in the metadata, caused by a partial split
KeyExtent mte11 =
new KeyExtent(MetadataTable.ID, tab1e1.toMetaRow(), ROOT_TABLE_EXTENT.endRow());
KeyExtent mte12 = new KeyExtent(MetadataTable.ID, tab1e21.toMetaRow(), tab1e1.toMetaRow());
deleteServer(tservers, "tserver10");
setLocation(tservers, "tserver4", ROOT_TABLE_EXTENT, mte12, "tserver10");
setLocation(tservers, "tserver10", mte12, tab1e21, "tserver12");
// at this point should be no table1 metadata
tab1TabletCache.invalidateCache(tab1e1);
tab1TabletCache.invalidateCache(tab1e21);
locateTabletTest(tab1TabletCache, "a", null, null);
locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver12");
locateTabletTest(tab1TabletCache, "r", tab1e22, "tserver9");
setLocation(tservers, "tserver4", ROOT_TABLE_EXTENT, mte11, "tserver5");
setLocation(tservers, "tserver5", mte11, tab1e1, "tserver13");
locateTabletTest(tab1TabletCache, "a", tab1e1, "tserver13");
locateTabletTest(tab1TabletCache, "h", tab1e21, "tserver12");
locateTabletTest(tab1TabletCache, "r", tab1e22, "tserver9");
}
@Test
public void test2() throws Exception {
TServers tservers = new TServers();
TabletLocatorImpl metaCache = createLocators(tservers, "tserver1", "tserver2", "foo");
KeyExtent ke1 = createNewKeyExtent("foo", "m", null);
KeyExtent ke2 = createNewKeyExtent("foo", null, "m");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke1, null);
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke2, "L1");
locateTabletTest(metaCache, "a", null, null);
locateTabletTest(metaCache, "r", ke2, "L1");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke1, "L2");
locateTabletTest(metaCache, "a", ke1, "L2");
locateTabletTest(metaCache, "r", ke2, "L1");
}
@Test
public void testBinRanges1() throws Exception {
TabletLocatorImpl metaCache =
createLocators("foo", createNewKeyExtent("foo", null, null), "l1");
List<Range> ranges = createNewRangeList(createNewRange(null, null));
Map<String,Map<KeyExtent,List<Range>>> expected =
createExpectedBinnings(createRangeLocation("l1", createNewKeyExtent("foo", null, null),
createNewRangeList(createNewRange(null, null))));
runTest(ranges, metaCache, expected);
ranges = createNewRangeList(createNewRange("a", null));
expected = createExpectedBinnings(createRangeLocation("l1",
createNewKeyExtent("foo", null, null), createNewRangeList(createNewRange("a", null)))
);
runTest(ranges, metaCache, expected);
ranges = createNewRangeList(createNewRange(null, "b"));
expected = createExpectedBinnings(createRangeLocation("l1",
createNewKeyExtent("foo", null, null), createNewRangeList(createNewRange(null, "b")))
);
runTest(ranges, metaCache, expected);
}
@Test
public void testBinRanges2() throws Exception {
List<Range> ranges = createNewRangeList(createNewRange(null, null));
TabletLocatorImpl metaCache = createLocators("foo", createNewKeyExtent("foo", "g", null), "l1",
createNewKeyExtent("foo", null, "g"), "l2");
Map<String,
Map<KeyExtent,List<Range>>> expected = createExpectedBinnings(
createRangeLocation("l1", createNewKeyExtent("foo", "g", null),
createNewRangeList(createNewRange(null, null))),
createRangeLocation("l2", createNewKeyExtent("foo", null, "g"),
createNewRangeList(createNewRange(null, null))));
runTest(ranges, metaCache, expected);
}
@Test
public void testBinRanges3() throws Exception {
// test with three tablets and a range that covers the whole table
List<Range> ranges = createNewRangeList(createNewRange(null, null));
TabletLocatorImpl metaCache = createLocators("foo", createNewKeyExtent("foo", "g", null), "l1",
createNewKeyExtent("foo", "m", "g"), "l2", createNewKeyExtent("foo", null, "m"), "l2");
Map<String,Map<KeyExtent,List<Range>>> expected = createExpectedBinnings(
createRangeLocation("l1", createNewKeyExtent("foo", "g", null),
createNewRangeList(createNewRange(null, null))),
createRangeLocation("l2", createNewKeyExtent("foo", "m", "g"),
createNewRangeList(createNewRange(null, null)), createNewKeyExtent("foo", null, "m"),
createNewRangeList(createNewRange(null, null))));
runTest(ranges, metaCache, expected);
// test with three tablets where one range falls within the first tablet and last two ranges
// fall within the last tablet
ranges = createNewRangeList(createNewRange(null, "c"), createNewRange("s", "y"),
createNewRange("z", null));
expected = createExpectedBinnings(
createRangeLocation("l1", createNewKeyExtent("foo", "g", null),
createNewRangeList(createNewRange(null, "c"))),
createRangeLocation("l2", createNewKeyExtent("foo", null, "m"),
createNewRangeList(createNewRange("s", "y"), createNewRange("z", null))));
runTest(ranges, metaCache, expected);
// test is same as above, but has an additional range that spans the first two tablets
ranges = createNewRangeList(createNewRange(null, "c"), createNewRange("f", "i"),
createNewRange("s", "y"), createNewRange("z", null));
expected = createExpectedBinnings(
createRangeLocation("l1", createNewKeyExtent("foo", "g", null),
createNewRangeList(createNewRange(null, "c"), createNewRange("f", "i"))),
createRangeLocation("l2", createNewKeyExtent("foo", "m", "g"),
createNewRangeList(createNewRange("f", "i")), createNewKeyExtent("foo", null, "m"),
createNewRangeList(createNewRange("s", "y"), createNewRange("z", null))));
runTest(ranges, metaCache, expected);
// test where start of range is not inclusive and same as tablet endRow
ranges = createNewRangeList(createNewRange("g", false, "m", true));
expected = createExpectedBinnings(createRangeLocation("l2", createNewKeyExtent("foo", "m", "g"),
createNewRangeList(createNewRange("g", false, "m", true))));
runTest(ranges, metaCache, expected);
// test where start of range is inclusive and same as tablet endRow
ranges = createNewRangeList(createNewRange("g", true, "m", true));
expected = createExpectedBinnings(
createRangeLocation("l1", createNewKeyExtent("foo", "g", null),
createNewRangeList(createNewRange("g", true, "m", true))),
createRangeLocation("l2", createNewKeyExtent("foo", "m", "g"),
createNewRangeList(createNewRange("g", true, "m", true))));
runTest(ranges, metaCache, expected);
ranges = createNewRangeList(createNewRange("g", true, "m", false));
expected = createExpectedBinnings(
createRangeLocation("l1", createNewKeyExtent("foo", "g", null),
createNewRangeList(createNewRange("g", true, "m", false))),
createRangeLocation("l2", createNewKeyExtent("foo", "m", "g"),
createNewRangeList(createNewRange("g", true, "m", false))));
runTest(ranges, metaCache, expected);
ranges = createNewRangeList(createNewRange("g", false, "m", false));
expected = createExpectedBinnings(createRangeLocation("l2", createNewKeyExtent("foo", "m", "g"),
createNewRangeList(createNewRange("g", false, "m", false))));
runTest(ranges, metaCache, expected);
}
@Test
public void testBinRanges4() throws Exception {
List<Range> ranges = createNewRangeList(new Range(new Text("1")));
TabletLocatorImpl metaCache = createLocators("foo", createNewKeyExtent("foo", "0", null), "l1",
createNewKeyExtent("foo", "1", "0"), "l2", createNewKeyExtent("foo", "2", "1"), "l3",
createNewKeyExtent("foo", "3", "2"), "l4", createNewKeyExtent("foo", null, "3"), "l5");
Map<String,Map<KeyExtent,List<Range>>> expected =
createExpectedBinnings(createRangeLocation("l2", createNewKeyExtent("foo", "1", "0"),
createNewRangeList(new Range(new Text("1")))));
runTest(ranges, metaCache, expected);
Key rowColKey = new Key(new Text("3"), new Text("cf1"), new Text("cq1"));
Range range =
new Range(rowColKey, true, new Key(new Text("3")).followingKey(PartialKey.ROW), false);
ranges = createNewRangeList(range);
Map<String,Map<KeyExtent,List<Range>>> expected4 = createExpectedBinnings(
createRangeLocation("l4", createNewKeyExtent("foo", "3", "2"), createNewRangeList(range)));
runTest(ranges, metaCache, expected4, createNewRangeList());
range = new Range(rowColKey, true, new Key(new Text("3")).followingKey(PartialKey.ROW), true);
ranges = createNewRangeList(range);
Map<String,Map<KeyExtent,List<Range>>> expected5 = createExpectedBinnings(
createRangeLocation("l4", createNewKeyExtent("foo", "3", "2"), createNewRangeList(range)),
createRangeLocation("l5", createNewKeyExtent("foo", null, "3"), createNewRangeList(range)));
runTest(ranges, metaCache, expected5, createNewRangeList());
range = new Range(new Text("2"), false, new Text("3"), false);
ranges = createNewRangeList(range);
Map<String,Map<KeyExtent,List<Range>>> expected6 = createExpectedBinnings(
createRangeLocation("l4", createNewKeyExtent("foo", "3", "2"), createNewRangeList(range)));
runTest(ranges, metaCache, expected6, createNewRangeList());
range = new Range(new Text("2"), true, new Text("3"), false);
ranges = createNewRangeList(range);
Map<String,Map<KeyExtent,List<Range>>> expected7 = createExpectedBinnings(
createRangeLocation("l3", createNewKeyExtent("foo", "2", "1"), createNewRangeList(range)),
createRangeLocation("l4", createNewKeyExtent("foo", "3", "2"), createNewRangeList(range)));
runTest(ranges, metaCache, expected7, createNewRangeList());
range = new Range(new Text("2"), false, new Text("3"), true);
ranges = createNewRangeList(range);
Map<String,Map<KeyExtent,List<Range>>> expected8 = createExpectedBinnings(
createRangeLocation("l4", createNewKeyExtent("foo", "3", "2"), createNewRangeList(range)));
runTest(ranges, metaCache, expected8, createNewRangeList());
range = new Range(new Text("2"), true, new Text("3"), true);
ranges = createNewRangeList(range);
Map<String,Map<KeyExtent,List<Range>>> expected9 = createExpectedBinnings(
createRangeLocation("l3", createNewKeyExtent("foo", "2", "1"), createNewRangeList(range)),
createRangeLocation("l4", createNewKeyExtent("foo", "3", "2"), createNewRangeList(range)));
runTest(ranges, metaCache, expected9, createNewRangeList());
}
@Test
public void testBinRanges5() throws Exception {
// Test binning when there is a hole in the metadata
List<Range> ranges = createNewRangeList(new Range(new Text("1")));
TabletLocatorImpl metaCache = createLocators("foo", createNewKeyExtent("foo", "0", null), "l1",
createNewKeyExtent("foo", "1", "0"), "l2", createNewKeyExtent("foo", "3", "2"), "l4",
createNewKeyExtent("foo", null, "3"), "l5");
Map<String,Map<KeyExtent,List<Range>>> expected1 =
createExpectedBinnings(createRangeLocation("l2", createNewKeyExtent("foo", "1", "0"),
createNewRangeList(new Range(new Text("1")))));
runTest(ranges, metaCache, expected1);
ranges = createNewRangeList(new Range(new Text("2")), new Range(new Text("11")));
Map<String,Map<KeyExtent,List<Range>>> expected2 = createExpectedBinnings();
runTest(ranges, metaCache, expected2, ranges);
ranges = createNewRangeList(new Range(new Text("1")), new Range(new Text("2")));
runTest(ranges, metaCache, expected1, createNewRangeList(new Range(new Text("2"))));
ranges = createNewRangeList(createNewRange("0", "2"), createNewRange("3", "4"));
Map<String,
Map<KeyExtent,List<Range>>> expected3 = createExpectedBinnings(
createRangeLocation("l4", createNewKeyExtent("foo", "3", "2"),
createNewRangeList(createNewRange("3", "4"))),
createRangeLocation("l5", createNewKeyExtent("foo", null, "3"),
createNewRangeList(createNewRange("3", "4"))));
runTest(ranges, metaCache, expected3, createNewRangeList(createNewRange("0", "2")));
ranges = createNewRangeList(createNewRange("0", "1"), createNewRange("0", "11"),
createNewRange("1", "2"), createNewRange("0", "4"), createNewRange("2", "4"),
createNewRange("21", "4"));
Map<String,
Map<KeyExtent,List<Range>>> expected4 = createExpectedBinnings(
createRangeLocation("l1", createNewKeyExtent("foo", "0", null),
createNewRangeList(createNewRange("0", "1"))),
createRangeLocation("l2", createNewKeyExtent("foo", "1", "0"),
createNewRangeList(createNewRange("0", "1"))),
createRangeLocation("l4", createNewKeyExtent("foo", "3", "2"),
createNewRangeList(createNewRange("21", "4"))),
createRangeLocation("l5", createNewKeyExtent("foo", null, "3"),
createNewRangeList(createNewRange("21", "4"))));
runTest(ranges, metaCache, expected4, createNewRangeList(createNewRange("0", "11"),
createNewRange("1", "2"), createNewRange("0", "4"), createNewRange("2", "4")));
}
@Test
public void testBinRangesNonContiguousExtents() throws Exception {
// This test exercises a bug that was seen in the tablet locator code.
KeyExtent e1 = createNewKeyExtent("foo", "05", null);
KeyExtent e2 = createNewKeyExtent("foo", "1", "05");
KeyExtent e3 = createNewKeyExtent("foo", "2", "05");
TServers tservers = new TServers();
TabletLocatorImpl metaCache =
createLocators(tservers, "tserver1", "tserver2", "foo", e1, "l1", e2, "l1");
List<Range> ranges = createNewRangeList(createNewRange("01", "07"));
Map<String,
Map<KeyExtent,List<Range>>> expected = createExpectedBinnings(
createRangeLocation("l1", e1, createNewRangeList(createNewRange("01", "07"))),
createRangeLocation("l1", e2, createNewRangeList(createNewRange("01", "07"))));
// The following will result in extents e1 and e2 being placed in the cache.
runTest(ranges, metaCache, expected, createNewRangeList());
// Add e3 to the metadata table. Extent e3 could not be added earlier in the test because it
// overlaps e2. If e2 and e3 are seen in the same metadata read then one will be removed from
// the cache because the cache can never contain overlapping extents.
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, e3, "l1");
// The following test reproduces a bug. Extents e1 and e2 are in the cache. Extent e3 overlaps
// e2 but is not in the cache. The range used by the test overlaps e1,e2,and e3. The bug was
// that for this situation the binRanges code in tablet locator used to return e1,e2,and e3. The
// desired behavior is that the range fails for this situation. This tablet locator bug caused
// the batch scanner to return duplicate data.
ranges = createNewRangeList(createNewRange("01", "17"));
runTest(ranges, metaCache, new HashMap<>(), createNewRangeList(createNewRange("01", "17")));
// After the above test fails it should cause e3 to be added to the cache. Because e3 overlaps
// e2, when e3 is added then e2 is removed. Therefore, the following binRanges call should
// succeed and find the range overlaps e1 and e3.
expected = createExpectedBinnings(
createRangeLocation("l1", e1, createNewRangeList(createNewRange("01", "17"))),
createRangeLocation("l1", e3, createNewRangeList(createNewRange("01", "17"))));
runTest(ranges, metaCache, expected, createNewRangeList());
}
@Test
public void testBinRangesNonContiguousExtentsAndMultipleRanges() throws Exception {
KeyExtent e1 = createNewKeyExtent("foo", "c", null);
KeyExtent e2 = createNewKeyExtent("foo", "g", "c");
KeyExtent e3 = createNewKeyExtent("foo", "k", "c");
KeyExtent e4 = createNewKeyExtent("foo", "n", "k");
KeyExtent e5 = createNewKeyExtent("foo", "q", "n");
KeyExtent e6 = createNewKeyExtent("foo", "s", "n");
KeyExtent e7 = createNewKeyExtent("foo", null, "s");
TServers tservers = new TServers();
TabletLocatorImpl metaCache = createLocators(tservers, "tserver1", "tserver2", "foo", e1, "l1",
e2, "l1", e4, "l1", e5, "l1", e7, "l1");
Range r1 = createNewRange("art", "cooking"); // overlaps e1 e2
Range r2 = createNewRange("loop", "nope"); // overlaps e4 e5
Range r3 = createNewRange("silly", "sunny"); // overlaps e7
Map<String,Map<KeyExtent,List<Range>>> expected = createExpectedBinnings(
createRangeLocation("l1", e1, createNewRangeList(r1)),
createRangeLocation("l1", e2, createNewRangeList(r1)),
createRangeLocation("l1", e4, createNewRangeList(r2)),
createRangeLocation("l1", e5, createNewRangeList(r2)),
createRangeLocation("l1", e7, createNewRangeList(r3)));
runTest(createNewRangeList(r1, r2, r3), metaCache, expected, createNewRangeList());
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, e3, "l1");
Range r4 = createNewRange("art", "good"); // overlaps e1 e3
Range r5 = createNewRange("gum", "run"); // overlaps e3 e4 e6
expected = createExpectedBinnings(createRangeLocation("l1", e7, createNewRangeList(r3)));
runTest(createNewRangeList(r4, r5, r3), metaCache, expected, createNewRangeList(r4, r5));
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, e6, "l1");
expected = createExpectedBinnings(createRangeLocation("l1", e1, createNewRangeList(r4)),
createRangeLocation("l1", e3, createNewRangeList(r4)),
createRangeLocation("l1", e7, createNewRangeList(r3)));
runTest(createNewRangeList(r4, r5, r3), metaCache, expected, createNewRangeList(r5));
expected = createExpectedBinnings(createRangeLocation("l1", e1, createNewRangeList(r4)),
createRangeLocation("l1", e3, createNewRangeList(r4, r5)),
createRangeLocation("l1", e4, createNewRangeList(r5)),
createRangeLocation("l1", e6, createNewRangeList(r5)),
createRangeLocation("l1", e7, createNewRangeList(r3)));
runTest(createNewRangeList(r4, r5, r3), metaCache, expected, createNewRangeList());
}
@Test
public void testIsContiguous() {
TabletLocation e1 = new TabletLocation(createNewKeyExtent("foo", "1", null), "l1", "1");
TabletLocation e2 = new TabletLocation(createNewKeyExtent("foo", "2", "1"), "l1", "1");
TabletLocation e3 = new TabletLocation(createNewKeyExtent("foo", "3", "2"), "l1", "1");
TabletLocation e4 = new TabletLocation(createNewKeyExtent("foo", null, "3"), "l1", "1");
assertTrue(TabletLocatorImpl.isContiguous(List.of(e1, e2, e3, e4)));
assertTrue(TabletLocatorImpl.isContiguous(List.of(e1, e2, e3)));
assertTrue(TabletLocatorImpl.isContiguous(List.of(e2, e3, e4)));
assertTrue(TabletLocatorImpl.isContiguous(List.of(e2, e3)));
assertTrue(TabletLocatorImpl.isContiguous(List.of(e1)));
assertTrue(TabletLocatorImpl.isContiguous(List.of(e2)));
assertTrue(TabletLocatorImpl.isContiguous(List.of(e4)));
assertFalse(TabletLocatorImpl.isContiguous(List.of(e1, e2, e4)));
assertFalse(TabletLocatorImpl.isContiguous(List.of(e1, e3, e4)));
TabletLocation e5 = new TabletLocation(createNewKeyExtent("foo", null, null), "l1", "1");
assertFalse(TabletLocatorImpl.isContiguous(List.of(e1, e2, e3, e4, e5)));
assertFalse(TabletLocatorImpl.isContiguous(List.of(e5, e1, e2, e3, e4)));
assertFalse(TabletLocatorImpl.isContiguous(List.of(e1, e2, e3, e5)));
assertFalse(TabletLocatorImpl.isContiguous(List.of(e5, e2, e3, e4)));
assertTrue(TabletLocatorImpl.isContiguous(List.of(e5)));
TabletLocation e6 = new TabletLocation(createNewKeyExtent("foo", null, "1"), "l1", "1");
assertFalse(TabletLocatorImpl.isContiguous(List.of(e1, e2, e3, e6)));
TabletLocation e7 = new TabletLocation(createNewKeyExtent("foo", "33", "11"), "l1", "1");
assertFalse(TabletLocatorImpl.isContiguous(List.of(e1, e2, e7, e4)));
}
@Test
public void testBinMutations1() throws Exception {
// one tablet table
KeyExtent ke1 = createNewKeyExtent("foo", null, null);
TabletLocatorImpl metaCache = createLocators("foo", ke1, "l1");
List<Mutation> ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("c", "cf1:cq1=v3", "cf1:cq2=v4"));
Map<String,Map<KeyExtent,List<String>>> emb = createServerExtentMap(
createServerExtent("a", "l1", ke1), createServerExtent("c", "l1", ke1));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("a", "cf1:cq3=v3"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1),
createServerExtent("a", "l1", ke1));
runTest(metaCache, ml, emb);
}
@Test
public void testBinMutations2() throws Exception {
// no tablets for table
TabletLocatorImpl metaCache = createLocators("foo");
List<Mutation> ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("c", "cf1:cq1=v3", "cf1:cq2=v4"));
Map<String,Map<KeyExtent,List<String>>> emb = createServerExtentMap();
runTest(metaCache, ml, emb, "a", "c");
}
@Test
public void testBinMutations3() throws Exception {
// three tablet table
KeyExtent ke1 = createNewKeyExtent("foo", "h", null);
KeyExtent ke2 = createNewKeyExtent("foo", "t", "h");
KeyExtent ke3 = createNewKeyExtent("foo", null, "t");
TabletLocatorImpl metaCache = createLocators("foo", ke1, "l1", ke2, "l2", ke3, "l3");
List<Mutation> ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("i", "cf1:cq1=v3", "cf1:cq2=v4"));
Map<String,Map<KeyExtent,List<String>>> emb = createServerExtentMap(
createServerExtent("a", "l1", ke1), createServerExtent("i", "l2", ke2));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("a", "cf1:cq3=v3"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1),
createServerExtent("a", "l1", ke1));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("w", "cf1:cq3=v3"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1),
createServerExtent("w", "l3", ke3));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("w", "cf1:cq3=v3"), createNewMutation("z", "cf1:cq4=v4"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1),
createServerExtent("w", "l3", ke3), createServerExtent("z", "l3", ke3));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("h", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("t", "cf1:cq1=v1", "cf1:cq2=v2"));
emb = createServerExtentMap(createServerExtent("h", "l1", ke1),
createServerExtent("t", "l2", ke2));
runTest(metaCache, ml, emb);
}
@Test
public void testBinMutations4() throws Exception {
// three table with hole
KeyExtent ke1 = createNewKeyExtent("foo", "h", null);
KeyExtent ke3 = createNewKeyExtent("foo", null, "t");
TabletLocatorImpl metaCache = createLocators("foo", ke1, "l1", ke3, "l3");
List<Mutation> ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("i", "cf1:cq1=v3", "cf1:cq2=v4"));
Map<String,Map<KeyExtent,List<String>>> emb =
createServerExtentMap(createServerExtent("a", "l1", ke1));
runTest(metaCache, ml, emb, "i");
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("a", "cf1:cq3=v3"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1),
createServerExtent("a", "l1", ke1));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("w", "cf1:cq3=v3"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1),
createServerExtent("w", "l3", ke3));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("w", "cf1:cq3=v3"), createNewMutation("z", "cf1:cq4=v4"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1),
createServerExtent("w", "l3", ke3), createServerExtent("z", "l3", ke3));
runTest(metaCache, ml, emb);
ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("w", "cf1:cq3=v3"), createNewMutation("z", "cf1:cq4=v4"),
createNewMutation("t", "cf1:cq5=v5"));
emb = createServerExtentMap(createServerExtent("a", "l1", ke1),
createServerExtent("w", "l3", ke3), createServerExtent("z", "l3", ke3));
runTest(metaCache, ml, emb, "t");
}
@Test
public void testBinSplit() throws Exception {
// try binning mutations and ranges when a tablet splits
for (int i = 0; i < 3; i++) {
// when i == 0 only test binning mutations
// when i == 1 only test binning ranges
// when i == 2 test both
KeyExtent ke1 = createNewKeyExtent("foo", null, null);
TServers tservers = new TServers();
TabletLocatorImpl metaCache =
createLocators(tservers, "tserver1", "tserver2", "foo", ke1, "l1");
List<Mutation> ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("m", "cf1:cq1=v3", "cf1:cq2=v4"), createNewMutation("z", "cf1:cq1=v5"));
Map<String,Map<KeyExtent,List<String>>> emb =
createServerExtentMap(createServerExtent("a", "l1", ke1),
createServerExtent("m", "l1", ke1), createServerExtent("z", "l1", ke1));
if (i == 0 || i == 2) {
runTest(metaCache, ml, emb);
}
List<Range> ranges = createNewRangeList(new Range(new Text("a")), new Range(new Text("m")),
new Range(new Text("z")));
Map<String,Map<KeyExtent,List<Range>>> expected1 = createExpectedBinnings(
createRangeLocation("l1", createNewKeyExtent("foo", null, null), ranges));
if (i == 1 || i == 2) {
runTest(ranges, metaCache, expected1);
}
KeyExtent ke11 = createNewKeyExtent("foo", "n", null);
KeyExtent ke12 = createNewKeyExtent("foo", null, "n");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke12, "l2");
metaCache.invalidateCache(ke1);
emb = createServerExtentMap(createServerExtent("z", "l2", ke12));
if (i == 0 || i == 2) {
runTest(metaCache, ml, emb, "a", "m");
}
Map<String,Map<KeyExtent,List<Range>>> expected2 =
createExpectedBinnings(createRangeLocation("l2", createNewKeyExtent("foo", null, "n"),
createNewRangeList(new Range(new Text("z")))));
if (i == 1 || i == 2) {
runTest(ranges, metaCache, expected2,
createNewRangeList(new Range(new Text("a")), new Range(new Text("m"))));
}
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke11, "l3");
emb = createServerExtentMap(createServerExtent("a", "l3", ke11),
createServerExtent("m", "l3", ke11), createServerExtent("z", "l2", ke12));
if (i == 0 || i == 2) {
runTest(metaCache, ml, emb);
}
Map<String,
Map<KeyExtent,List<Range>>> expected3 = createExpectedBinnings(
createRangeLocation("l2", createNewKeyExtent("foo", null, "n"),
createNewRangeList(new Range(new Text("z")))),
createRangeLocation("l3", createNewKeyExtent("foo", "n", null),
createNewRangeList(new Range(new Text("a")), new Range(new Text("m"))))
);
if (i == 1 || i == 2) {
runTest(ranges, metaCache, expected3);
}
}
}
@Test
public void testBug1() throws Exception {
// a bug that occurred while running continuous ingest
KeyExtent mte1 = new KeyExtent(MetadataTable.ID, new Text("0;0bc"), ROOT_TABLE_EXTENT.endRow());
KeyExtent mte2 = new KeyExtent(MetadataTable.ID, null, new Text("0;0bc"));
TServers tservers = new TServers();
TestTabletLocationObtainer ttlo = new TestTabletLocationObtainer(tservers);
RootTabletLocator rtl = new TestRootTabletLocator();
TabletLocatorImpl rootTabletCache =
new TabletLocatorImpl(MetadataTable.ID, rtl, ttlo, new YesLockChecker());
TabletLocatorImpl tab0TabletCache =
new TabletLocatorImpl(TableId.of("0"), rootTabletCache, ttlo, new YesLockChecker());
setLocation(tservers, "tserver1", ROOT_TABLE_EXTENT, mte1, "tserver2");
setLocation(tservers, "tserver1", ROOT_TABLE_EXTENT, mte2, "tserver3");
// create two tablets that straddle a metadata split point
KeyExtent ke1 = new KeyExtent(TableId.of("0"), new Text("0bbf20e"), null);
KeyExtent ke2 = new KeyExtent(TableId.of("0"), new Text("0bc0756"), new Text("0bbf20e"));
setLocation(tservers, "tserver2", mte1, ke1, "tserver4");
setLocation(tservers, "tserver3", mte2, ke2, "tserver5");
// look up something that comes after the last entry in mte1
locateTabletTest(tab0TabletCache, "0bbff", ke2, "tserver5");
}
@Test
public void testBug2() throws Exception {
// a bug that occurred while running a functional test
KeyExtent mte1 = new KeyExtent(MetadataTable.ID, new Text("~"), ROOT_TABLE_EXTENT.endRow());
KeyExtent mte2 = new KeyExtent(MetadataTable.ID, null, new Text("~"));
TServers tservers = new TServers();
TestTabletLocationObtainer ttlo = new TestTabletLocationObtainer(tservers);
RootTabletLocator rtl = new TestRootTabletLocator();
TabletLocatorImpl rootTabletCache =
new TabletLocatorImpl(MetadataTable.ID, rtl, ttlo, new YesLockChecker());
TabletLocatorImpl tab0TabletCache =
new TabletLocatorImpl(TableId.of("0"), rootTabletCache, ttlo, new YesLockChecker());
setLocation(tservers, "tserver1", ROOT_TABLE_EXTENT, mte1, "tserver2");
setLocation(tservers, "tserver1", ROOT_TABLE_EXTENT, mte2, "tserver3");
// create the ~ tablet so it exists
Map<KeyExtent,SortedMap<Key,Value>> ts3 = new HashMap<>();
ts3.put(mte2, new TreeMap<>());
tservers.tservers.put("tserver3", ts3);
assertNull(tab0TabletCache.locateTablet(context, new Text("row_0000000000"), false, false));
}
// this test reproduces a problem where empty metadata tablets, that were created by user tablets
// being merged away, caused locating tablets to fail
@Test
public void testBug3() throws Exception {
KeyExtent mte1 = new KeyExtent(MetadataTable.ID, new Text("1;c"), ROOT_TABLE_EXTENT.endRow());
KeyExtent mte2 = new KeyExtent(MetadataTable.ID, new Text("1;f"), new Text("1;c"));
KeyExtent mte3 = new KeyExtent(MetadataTable.ID, new Text("1;j"), new Text("1;f"));
KeyExtent mte4 = new KeyExtent(MetadataTable.ID, new Text("1;r"), new Text("1;j"));
KeyExtent mte5 = new KeyExtent(MetadataTable.ID, null, new Text("1;r"));
KeyExtent ke1 = new KeyExtent(TableId.of("1"), null, null);
TServers tservers = new TServers();
TestTabletLocationObtainer ttlo = new TestTabletLocationObtainer(tservers);
RootTabletLocator rtl = new TestRootTabletLocator();
TabletLocatorImpl rootTabletCache =
new TabletLocatorImpl(MetadataTable.ID, rtl, ttlo, new YesLockChecker());
TabletLocatorImpl tab0TabletCache =
new TabletLocatorImpl(TableId.of("1"), rootTabletCache, ttlo, new YesLockChecker());
setLocation(tservers, "tserver1", ROOT_TABLE_EXTENT, mte1, "tserver2");
setLocation(tservers, "tserver1", ROOT_TABLE_EXTENT, mte2, "tserver3");
setLocation(tservers, "tserver1", ROOT_TABLE_EXTENT, mte3, "tserver4");
setLocation(tservers, "tserver1", ROOT_TABLE_EXTENT, mte4, "tserver5");
setLocation(tservers, "tserver1", ROOT_TABLE_EXTENT, mte5, "tserver6");
createEmptyTablet(tservers, "tserver2", mte1);
createEmptyTablet(tservers, "tserver3", mte2);
createEmptyTablet(tservers, "tserver4", mte3);
createEmptyTablet(tservers, "tserver5", mte4);
setLocation(tservers, "tserver6", mte5, ke1, "tserver7");
locateTabletTest(tab0TabletCache, "a", ke1, "tserver7");
}
@Test
public void testAccumulo1248() {
TServers tservers = new TServers();
TabletLocatorImpl metaCache = createLocators(tservers, "tserver1", "tserver2", "foo");
KeyExtent ke1 = createNewKeyExtent("foo", null, null);
// set two locations for a tablet, this is not supposed to happen. The metadata cache should
// throw an exception if it sees this rather than caching one of
// the locations.
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke1, "L1", "I1");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke1, "L2", "I2");
var e = assertThrows(IllegalStateException.class,
() -> metaCache.locateTablet(context, new Text("a"), false, false));
assertTrue(e.getMessage().startsWith("Tablet has multiple locations : "));
}
@Test
public void testLostLock() throws Exception {
final HashSet<String> activeLocks = new HashSet<>();
TServers tservers = new TServers();
TabletLocatorImpl metaCache =
createLocators(tservers, "tserver1", "tserver2", "foo", new TabletServerLockChecker() {
@Override
public boolean isLockHeld(String tserver, String session) {
return activeLocks.contains(tserver + ":" + session);
}
@Override
public void invalidateCache(String server) {}
});
KeyExtent ke1 = createNewKeyExtent("foo", null, null);
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke1, "L1", "5");
activeLocks.add("L1:5");
locateTabletTest(metaCache, "a", ke1, "L1");
locateTabletTest(metaCache, "a", ke1, "L1");
activeLocks.clear();
locateTabletTest(metaCache, "a", null, null);
locateTabletTest(metaCache, "a", null, null);
locateTabletTest(metaCache, "a", null, null);
clearLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke1, "5");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke1, "L2", "6");
activeLocks.add("L2:6");
locateTabletTest(metaCache, "a", ke1, "L2");
locateTabletTest(metaCache, "a", ke1, "L2");
clearLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke1, "6");
locateTabletTest(metaCache, "a", ke1, "L2");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke1, "L3", "7");
locateTabletTest(metaCache, "a", ke1, "L2");
activeLocks.clear();
locateTabletTest(metaCache, "a", null, null);
locateTabletTest(metaCache, "a", null, null);
activeLocks.add("L3:7");
locateTabletTest(metaCache, "a", ke1, "L3");
locateTabletTest(metaCache, "a", ke1, "L3");
List<Mutation> ml = createNewMutationList(createNewMutation("a", "cf1:cq1=v1", "cf1:cq2=v2"),
createNewMutation("w", "cf1:cq3=v3"));
Map<String,Map<KeyExtent,List<String>>> emb = createServerExtentMap(
createServerExtent("a", "L3", ke1), createServerExtent("w", "L3", ke1));
runTest(metaCache, ml, emb);
clearLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke1, "7");
runTest(metaCache, ml, emb);
activeLocks.clear();
emb.clear();
runTest(metaCache, ml, emb, "a", "w");
runTest(metaCache, ml, emb, "a", "w");
KeyExtent ke11 = createNewKeyExtent("foo", "m", null);
KeyExtent ke12 = createNewKeyExtent("foo", null, "m");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke11, "L1", "8");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke12, "L2", "9");
runTest(metaCache, ml, emb, "a", "w");
activeLocks.add("L1:8");
emb = createServerExtentMap(createServerExtent("a", "L1", ke11));
runTest(metaCache, ml, emb, "w");
activeLocks.add("L2:9");
emb = createServerExtentMap(createServerExtent("a", "L1", ke11),
createServerExtent("w", "L2", ke12));
runTest(metaCache, ml, emb);
List<Range> ranges =
createNewRangeList(new Range("a"), createNewRange("b", "o"), createNewRange("r", "z"));
Map<String,
Map<KeyExtent,List<Range>>> expected = createExpectedBinnings(
createRangeLocation("L1", ke11,
createNewRangeList(new Range("a"), createNewRange("b", "o"))),
createRangeLocation("L2", ke12,
createNewRangeList(createNewRange("b", "o"), createNewRange("r", "z"))));
runTest(ranges, metaCache, expected);
activeLocks.remove("L2:9");
expected =
createExpectedBinnings(createRangeLocation("L1", ke11, createNewRangeList(new Range("a"))));
runTest(ranges, metaCache, expected,
createNewRangeList(createNewRange("b", "o"), createNewRange("r", "z")));
activeLocks.clear();
expected = createExpectedBinnings();
runTest(ranges, metaCache, expected,
createNewRangeList(new Range("a"), createNewRange("b", "o"), createNewRange("r", "z")));
clearLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke11, "8");
clearLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke12, "9");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke11, "L3", "10");
setLocation(tservers, "tserver2", METADATA_TABLE_EXTENT, ke12, "L4", "11");
runTest(ranges, metaCache, expected,
createNewRangeList(new Range("a"), createNewRange("b", "o"), createNewRange("r", "z")));
activeLocks.add("L3:10");
expected =
createExpectedBinnings(createRangeLocation("L3", ke11, createNewRangeList(new Range("a"))));
runTest(ranges, metaCache, expected,
createNewRangeList(createNewRange("b", "o"), createNewRange("r", "z")));
activeLocks.add("L4:11");
expected = createExpectedBinnings(
createRangeLocation("L3", ke11,
createNewRangeList(new Range("a"), createNewRange("b", "o"))),
createRangeLocation("L4", ke12,
createNewRangeList(createNewRange("b", "o"), createNewRange("r", "z"))));
runTest(ranges, metaCache, expected);
}
}
| 9,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.