index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/json/ExistenceModifier.java | /*******************************************************************************
* Copyright 2018 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.recipes.json;
import lombok.Getter;
import lombok.ToString;
import redis.clients.jedis.commands.ProtocolCommand;
import redis.clients.jedis.util.SafeEncoder;
@ToString
@Getter
public enum ExistenceModifier implements ProtocolCommand {
DEFAULT(""),
NOT_EXISTS("NX"),
MUST_EXIST("XX");
private final byte[] raw;
ExistenceModifier(String modifier) {
this.raw = SafeEncoder.encode(modifier);
}
}
| 6,100 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/json/JsonCommand.java | /*******************************************************************************
* Copyright 2018 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.recipes.json;
import lombok.Getter;
import redis.clients.jedis.commands.ProtocolCommand;
import redis.clients.jedis.util.SafeEncoder;
@Getter
public enum JsonCommand implements ProtocolCommand {
SET("JSON.SET"),
GET("JSON.GET"),
DEL("JSON.DEL"),
TYPE("JSON.TYPE"),
MGET("JSON.MGET"),
ARRAPPEND("JSON.ARRAPPEND"),
ARRINSERT("JSON.ARRINSERT"),
ARRLEN("JSON.ARRLEN"),
OBJKEYS("JSON.OBJKEYS"),
OBJLEN("JSON.OBJLEN");
private final byte[] raw;
JsonCommand(String opName) {
this.raw = SafeEncoder.encode(opName);
}
}
| 6,101 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/json/JsonPath.java | /*******************************************************************************
* Copyright 2018 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.recipes.json;
public class JsonPath {
private static final String PATH_DELIMITER = ".";
/* package private */ static final JsonPath ROOT_PATH = new JsonPath(".");
private final StringBuilder pathBuilder;
public JsonPath() {
pathBuilder = new StringBuilder();
}
public JsonPath(String path) {
pathBuilder = new StringBuilder().append(path);
}
public JsonPath appendSubKey(String subKey) {
if (!pathBuilder.equals(ROOT_PATH.pathBuilder)) {
pathBuilder.append(PATH_DELIMITER).append(subKey);
} else {
pathBuilder.append(subKey);
}
return this;
}
public JsonPath atIndex(int index) {
pathBuilder.append('[').append(index).append(']');
return this;
}
public String toString() {
return pathBuilder.toString();
}
}
| 6,102 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/counter/DynoCounter.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.recipes.counter;
import java.util.List;
/**
* A counter whose semantics mimic that of an in-memory counter. The counter supports incrementing by one or
* by an arbitrary natural number.
* <p>
* This counter has a lifecycle; callers must invoke {@link #initialize()} prior to all other methods except
* {@link #getKey()} otherwise an {@link IllegalStateException} may be thrown. To properly shutdown the counter
* Callers must invoke {@link #close}.
* </p>
*
* @author jcacciatore
* @see {@link AutoCloseable}
*/
public interface DynoCounter extends AutoCloseable {
/**
* Initializes the counter
*/
void initialize();
/**
* Increments the counter instance by one.
*/
void incr();
/**
* Increments the counter instance by the given value
*
* @param value
*/
void incrBy(long value);
/**
* Retrieves the value of the counter instance. The value is the sum of the values of each individual key.
*
* @return {@link Long}
*/
Long get();
/**
* The key of the counter instance
*
* @return String representation of the key
*/
String getKey();
/**
* Returns the keys of all shards of the counter instance.
*
* @return The keys of all shards of the counter instance.
*/
List<String> getGeneratedKeys();
}
| 6,103 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/counter/DynoJedisBatchCounter.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.recipes.counter;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import com.netflix.dyno.jedis.DynoJedisClient;
import javax.annotation.concurrent.ThreadSafe;
/**
* Batch implementation of {@link DynoCounter} that uses an in-memory counter to
* track {@link #incr()} calls and flushes the value at the given frequency.
*/
@ThreadSafe
public class DynoJedisBatchCounter implements DynoCounter {
private final AtomicBoolean initialized = new AtomicBoolean(false);
private final AtomicLong localCounter;
private final AtomicReference<DynoJedisCounter> counter = new AtomicReference<DynoJedisCounter>(null);
private final Long frequencyInMillis;
private final ScheduledExecutorService counterThreadPool = Executors.newScheduledThreadPool(1, new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
return new Thread(r, "DynoJedisBatchCounter-Poller");
}
});
public DynoJedisBatchCounter(String key, DynoJedisClient client, Long frequencyInMillis) {
this.counter.compareAndSet(null, new DynoJedisCounter(key, client));
this.localCounter = new AtomicLong(0L);
this.frequencyInMillis = frequencyInMillis;
}
@Override
public void initialize() {
if (initialized.compareAndSet(false, true)) {
counter.get().initialize();
counterThreadPool.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
if (localCounter.get() > 0) {
counter.get().incrBy(localCounter.getAndSet(0));
}
}
}, 1000, frequencyInMillis, TimeUnit.MILLISECONDS);
}
}
@Override
public void incr() {
if (!initialized.get()) {
throw new IllegalStateException("Counter has not been initialized");
}
this.localCounter.incrementAndGet();
}
@Override
public void incrBy(long value) {
if (!initialized.get()) {
throw new IllegalStateException("Counter has not been initialized");
}
this.localCounter.addAndGet(value);
}
@Override
public Long get() {
return counter.get().get();
}
@Override
public String getKey() {
return counter.get().getKey();
}
@Override
public List<String> getGeneratedKeys() {
return counter.get().getGeneratedKeys();
}
@Override
public void close() throws Exception {
try {
counterThreadPool.shutdownNow();
} catch (Throwable th) {
// ignore
}
}
}
| 6,104 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/counter/DynoJedisCounter.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.recipes.counter;
import com.netflix.dyno.connectionpool.TokenPoolTopology;
import com.netflix.dyno.connectionpool.TopologyView;
import com.netflix.dyno.jedis.DynoJedisClient;
import org.slf4j.LoggerFactory;
import javax.annotation.concurrent.ThreadSafe;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Synchronous implementation of a {@link DynoCounter}. This class is the base
* class for other implementations as it contains the logic to shard the counter
* key.
* <p>
* All DynoJedis*Counter implementations are predicated upon Dynomite's features in conjunction with
* Redis's atomic increment functionality.
* </p>
*
* @author jcacciatore
* @see {@INCR http://redis.io/commands/INCR}
*/
@ThreadSafe
public class DynoJedisCounter implements DynoCounter {
private static final org.slf4j.Logger logger = LoggerFactory.getLogger(DynoJedisCounter.class);
private static final int MAX_ITERATIONS = 1000;
protected final String key;
protected final DynoJedisClient client;
protected final List<String> generatedKeys;
public DynoJedisCounter(String key, DynoJedisClient client) {
this.key = key;
this.client = client;
this.generatedKeys = generateKeys();
}
@Override
public void initialize() {
// set Lifecycle state
}
public void incr() {
client.incr(generatedKeys.get(randomIntFrom0toN()));
}
public void incrBy(long value) {
client.incrBy(generatedKeys.get(randomIntFrom0toN()), value);
}
public Long get() {
Long result = 0L;
ArrayList<String> values = new ArrayList<String>(generatedKeys.size());
for (String key : generatedKeys) {
String val = client.get(key);
if (val != null) {
result += Long.valueOf(val);
values.add(val);
}
}
logger.debug("result=>" + result + ", key: " + key + ", values: " + values.toString());
return result;
}
public String getKey() {
return key;
}
public List<String> getGeneratedKeys() {
return Collections.unmodifiableList(generatedKeys);
}
List<String> generateKeys() {
final TopologyView view = client.getTopologyView();
final Map<String, List<TokenPoolTopology.TokenStatus>> topology = view.getTopologySnapshot();
if (topology.keySet().isEmpty()) {
throw new RuntimeException("Unable to determine dynomite topology");
}
// Retrieve the tokens for the cluster
final List<String> racks = new ArrayList<String>(topology.keySet());
final Set<Long> tokens = new HashSet<Long>();
for (TokenPoolTopology.TokenStatus status : topology.get(racks.get(0))) {
tokens.add(status.getToken());
}
final List<String> generatedKeys = new ArrayList<String>(tokens.size());
// Find a key corresponding to each token
int i = 0;
while (tokens.size() > 0 && i++ < MAX_ITERATIONS) {
Long token = view.getTokenForKey(key + "_" + i);
if (tokens.contains(token)) {
if (tokens.remove(token)) {
String generated = key + "_" + i;
logger.debug(String.format("Found key=>%s for token=>%s", generated, token));
generatedKeys.add(generated);
}
}
}
return generatedKeys;
}
int randomIntFrom0toN() {
// XORShift instead of Math.random http://javamex.com/tutorials/random_numbers/xorshift.shtml
long x = System.nanoTime();
x ^= (x << 21);
x ^= (x >>> 35);
x ^= (x << 4);
return Math.abs((int) x % generatedKeys.size());
}
@Override
public void close() throws Exception {
// nothing to do for this implementation
}
}
| 6,105 |
0 | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes | Create_ds/dyno/dyno-recipes/src/main/java/com/netflix/dyno/recipes/counter/DynoJedisPipelineCounter.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.recipes.counter;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.annotation.concurrent.ThreadSafe;
import org.slf4j.LoggerFactory;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.jedis.DynoJedisPipeline;
import com.netflix.dyno.recipes.util.Tuple;
/**
* Pipeline implementation of {@link DynoCounter}. This implementation has slightly different semantics than
* {@link DynoJedisPipeline} in that both {@link #incr()} and {@link #sync()} are asynchronous.
* <p>
* Note that this implementation is thread-safe whereas {@link DynoJedisPipeline} is not.
* </p>
*
* @author jcacciatore
* @see <a href="http://redis.io/topics/pipelining">Redis Pipelining</a>
*/
@ThreadSafe
public class DynoJedisPipelineCounter extends DynoJedisCounter {
private enum Command {
INCR,
SYNC,
STOP
}
private static final org.slf4j.Logger logger = LoggerFactory.getLogger(DynoJedisPipelineCounter.class);
private final LinkedBlockingQueue<Command> queue = new LinkedBlockingQueue<Command>();
private final ExecutorService counterThreadPool = Executors.newSingleThreadExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
return new Thread(r, "DynoJedisPipelineCounter-Poller");
}
});
private final AtomicBoolean initialized = new AtomicBoolean(false);
private final CountDownLatch latch = new CountDownLatch(1);
private final Consumer consumer;
public DynoJedisPipelineCounter(String key, DynoJedisClient client) {
super(key, client);
this.consumer = new Consumer(queue, generatedKeys);
}
@Override
public void initialize() {
if (initialized.compareAndSet(false, true)) {
super.initialize();
counterThreadPool.submit(consumer);
}
}
@Override
public void incr() {
if (!initialized.get()) {
throw new IllegalStateException("Counter has not been initialized");
}
queue.offer(Command.INCR);
}
public void sync() {
if (!initialized.get()) {
throw new IllegalStateException("Counter has not been initialized");
}
logger.debug("sending SYNC offer");
queue.offer(Command.SYNC);
}
@Override
public void close() {
if (!initialized.get()) {
throw new IllegalStateException("Counter has not been initialized");
}
queue.offer(Command.STOP);
try {
latch.await(2000L, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// ignore
}
}
class Consumer implements Runnable {
/**
* Used as a synchronizer between producer threads and this Consumer.
*/
private final LinkedBlockingQueue<Command> queue;
/**
* Keys that were generated by an instance of a {@link DynoCounter}.
*/
private final List<String> keys;
/**
* Used for debugging
*/
private Long syncCount = 0L;
/**
* Used to ensure there are operations to sync in the pipeline. This is not
* an optimization; the pipeline can block if multiple SYNCs are processed
*/
private int pipelineOps = 0;
/**
* Contains a mapping of sharded-key to pipeline.
*/
private List<Tuple<String, DynoJedisPipeline>> keysAndPipelines;
public Consumer(final LinkedBlockingQueue<Command> queue, final List<String> keys) {
this.queue = queue;
this.keys = keys;
keysAndPipelines = new ArrayList<Tuple<String, DynoJedisPipeline>>(keys.size());
for (String key : keys) {
keysAndPipelines.add(new Tuple<String, DynoJedisPipeline>(key, client.pipelined()));
}
}
@Override
public void run() {
Command cmd = null;
do {
try {
cmd = queue.take();
switch (cmd) {
case INCR: {
Tuple<String, DynoJedisPipeline> tuple = keysAndPipelines.get(randomIntFrom0toN());
tuple._2().incr(tuple._1());
pipelineOps++;
break;
}
case SYNC: {
syncCount++;
logger.debug(Thread.currentThread().getName() + " - SYNC " + syncCount + " received");
if (pipelineOps > 0) {
for (Tuple<String, DynoJedisPipeline> tuple : keysAndPipelines) {
tuple._2().sync();
}
keysAndPipelines = new ArrayList<Tuple<String, DynoJedisPipeline>>(keys.size());
for (String key : keys) {
keysAndPipelines.add(new Tuple<String, DynoJedisPipeline>(key, client.pipelined()));
}
pipelineOps = 0;
}
logger.debug(Thread.currentThread().getName() + " - SYNC " + syncCount + " done");
break;
}
case STOP: {
counterThreadPool.shutdownNow();
latch.countDown();
break;
}
}
} catch (InterruptedException e) {
// ignore
}
} while (cmd != Command.STOP);
}
}
}
| 6,106 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/CommandTest.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.jedis;
import static org.mockito.Mockito.when;
import java.io.IOException;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import com.netflix.dyno.connectionpool.ConnectionPool;
import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration;
import com.netflix.dyno.connectionpool.ConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.OperationMonitor;
import com.netflix.dyno.connectionpool.impl.LastOperationMonitor;
import redis.clients.jedis.Jedis;
/**
* Tests generic commands.
*
* Note - The underlying jedis client has been mocked to echo back the value
* given for SET operations
*/
public class CommandTest {
private DynoJedisClient client;
private ConnectionPool<Jedis> connectionPool;
private OperationMonitor opMonitor;
@Mock
DynoJedisPipelineMonitor pipelineMonitor;
@Mock
ConnectionPoolMonitor cpMonitor;
@Mock
ConnectionPoolConfiguration config;
@Before
public void before() {
MockitoAnnotations.initMocks(this);
opMonitor = new LastOperationMonitor();
connectionPool = new UnitTestConnectionPool(config, opMonitor);
client = new DynoJedisClient.TestBuilder().withAppname("CommandTest").withConnectionPool(connectionPool)
.build();
}
@Test
public void testDynoJedis_GetSet() {
String resultSet = client.set("keyFor1KBValue", VALUE_1KB);
Assert.assertEquals("OK", resultSet); // value should not be compressed
String resultGet = client.get("keyFor1KBValue");
Assert.assertEquals(VALUE_1KB, resultGet);
Long resultDel = client.del("keyFor1KBValue");
Assert.assertEquals((long) 1, (long) resultDel);
}
public static final String KEY_1KB = "keyFor1KBValue";
public static final String VALUE_1KB = generateValue(1000);
private static String generateValue(int msgSize) {
StringBuilder sb = new StringBuilder(msgSize);
for (int i = 0; i < msgSize; i++) {
sb.append('a');
}
return sb.toString();
}
}
| 6,107 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/UnitTestTokenMapAndHostSupplierImpl.java | package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import org.apache.commons.lang3.tuple.Pair;
import redis.embedded.RedisServer;
import java.io.IOException;
import java.net.ServerSocket;
import java.util.*;
public class UnitTestTokenMapAndHostSupplierImpl implements TokenMapSupplier, HostSupplier {
private final Map<Host, HostToken> hostTokenMap = new HashMap<>();
private final List<Pair<RedisServer, Integer>> redisServers = new ArrayList<>();
public UnitTestTokenMapAndHostSupplierImpl(int serverCount, String rack) throws IOException {
int hostTokenStride = Integer.MAX_VALUE / serverCount;
for (int i = 0; i < serverCount; i++) {
int port = findFreePort();
RedisServer redisServer = new RedisServer(port);
redisServer.start();
redisServers.add(Pair.of(redisServer, port));
Host host = new HostBuilder().setHostname("localhost").setPort(port).setRack(rack).setStatus(Host.Status.Up).createHost();
hostTokenMap.put(host, new HostToken((long) i * hostTokenStride, host));
}
}
private int findFreePort() {
int port = 0;
while (port == 0) {
try {
ServerSocket socket = new ServerSocket(0);
port = socket.getLocalPort();
socket.close();
} catch (IOException e) {
// find next port
}
}
return port;
}
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
return new ArrayList<>(hostTokenMap.values());
}
@Override
public HostToken getTokenForHost(Host host, Set<Host> activeHosts) {
return hostTokenMap.get(host);
}
@Override
public List<Host> getHosts() {
return new ArrayList<>(hostTokenMap.keySet());
}
public void shutdown() {
redisServers.forEach(x -> x.getLeft().stop());
}
public void pauseServer(int idx) {
redisServers.get(idx).getLeft().stop();
}
public void resumeServer(int idx) {
redisServers.get(idx).getLeft().start();
}
}
| 6,108 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/CompressionTest.java | /*******************************************************************************
* Copyright 2015 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.*;
import com.netflix.dyno.connectionpool.impl.LastOperationMonitor;
import com.netflix.dyno.connectionpool.impl.utils.ZipUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import redis.clients.jedis.Jedis;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.mockito.Mockito.when;
/**
* Tests compression commands.
* <p>
* Note - The underlying jedis client has been mocked to echo back the value given for SET operations and to
* ensure values over a 2KB threshold are compressed for HMSET operations.
*/
public class CompressionTest {
private DynoJedisClient client;
private ConnectionPool<Jedis> connectionPool;
private OperationMonitor opMonitor;
@Mock
DynoJedisPipelineMonitor pipelineMonitor;
@Mock
ConnectionPoolMonitor cpMonitor;
@Mock
ConnectionPoolConfiguration config;
@Before
public void before() {
MockitoAnnotations.initMocks(this);
when(config.getValueCompressionThreshold()).thenReturn(2 * 1024);
opMonitor = new LastOperationMonitor();
connectionPool = new UnitTestConnectionPoolForCompression(config, opMonitor);
client = new DynoJedisClient.TestBuilder()
.withAppname("CompressionTest")
.withConnectionPool(connectionPool)
.build();
}
@Test
public void testDynoJedis_Set_UnderCompressionThreshold() {
String result = client.set("keyFor1KBValue", VALUE_1KB);
Assert.assertEquals(VALUE_1KB, result); // value should not be compressed
}
@Test
public void testDynoJedis_Set_AboveCompressionThreshold() throws IOException {
String result = client.set("keyFor3KBValue", VALUE_3KB);
Assert.assertTrue(result.length() < 3072);
Assert.assertTrue(ZipUtils.isCompressed(result));
}
@Test
public void testDynoJedis_Get_UnderCompressionThreshold() {
client.set(KEY_1KB, VALUE_1KB);
String result = client.get(KEY_1KB);
Assert.assertEquals(VALUE_1KB, result);
}
@Test
public void testDynoJedis_Get_AboveCompressionThreshold() throws IOException {
client.set(KEY_3KB, VALUE_3KB);
String result = client.get(KEY_3KB);
Assert.assertTrue(!ZipUtils.isCompressed(result));
Assert.assertEquals(VALUE_3KB, result);
}
@Test
public void testDynoJedis_Mget() throws IOException {
client.set(KEY_1KB, VALUE_1KB);
client.set(KEY_2KB, VALUE_2KB);
client.set(KEY_3KB, VALUE_3KB);
// Expect one key as missing in datastore
//client.set(KEY_4KB, VALUE_4KB);
client.set(KEY_5KB, VALUE_5KB);
String[] keys = {KEY_1KB, KEY_2KB, KEY_3KB, KEY_4KB, KEY_5KB};
// expected value list
String[] values = {VALUE_1KB, VALUE_2KB, VALUE_3KB, null, VALUE_5KB};
List<String> result = client.mget(keys);
Assert.assertEquals(result.size(), keys.length);
for (int i = 0; i < keys.length; i++) {
String value = result.get(i);
Assert.assertEquals(value, values[i]);
}
}
@Test
public void testDynoJedis_Hmset_AboveCompressionThreshold() throws IOException {
final Map<String, String> map = new HashMap<String, String>();
map.put(KEY_1KB, VALUE_1KB);
map.put(KEY_3KB, VALUE_3KB);
client.d_hmset("compressionTestKey", map);
LastOperationMonitor monitor = (LastOperationMonitor) opMonitor;
Assert.assertTrue(1 == monitor.getSuccessCount(OpName.HMSET.name(), true));
}
@Test
public void testZipUtilsDecompressBytesNonBase64() throws Exception {
String s = "ABCDEFG__abcdefg__1234567890'\"\\+=-::ABCDEFG__abcdefg__1234567890'\"\\+=-::ABCDEFG__abcdefg__1234567890'\"\\+=-";
byte[] val = s.getBytes();
byte[] compressed = ZipUtils.compressBytesNonBase64(val);
Assert.assertTrue(compressed.length < val.length);
byte[] decompressed = ZipUtils.decompressBytesNonBase64(compressed);
Assert.assertEquals(s, new String(decompressed));
}
// @Test
// public void testDynoJedisPipeline_Binary_HGETALL() throws Exception {
// Map<byte[], byte[]>
//
// ConnectionPoolImpl cp = mock(ConnectionPoolImpl.class);
//
// DynoJedisPipeline pipeline = new
// DynoJedisPipeline(cp, pipelineMonitor, cpMonitor);
//
// //pipeline.hgetAll();
//
// }
public static final String KEY_1KB = "keyFor1KBValue";
public static final String KEY_2KB = "keyFor2KBValue";
public static final String KEY_3KB = "keyFor3KBValue";
public static final String KEY_4KB = "keyFor4KBValue";
public static final String KEY_5KB = "keyFor5KBValue";
public static final String VALUE_1KB = generateValue(1);
public static final String VALUE_2KB = generateValue(2);
public static final String VALUE_3KB = generateValue(3);
public static final String VALUE_4KB = generateValue(4);
public static final String VALUE_5KB = generateValue(5);
private static String generateValue(int kilobytes) {
StringBuilder sb = new StringBuilder(kilobytes * 512); // estimating 2 bytes per char
for (int i = 0; i < kilobytes; i++) {
for (int j = 0; j < 10; j++) {
sb.append("abcdefghijklmnopqrstuvwxzy0123456789a1b2c3d4f5g6h7"); // 50 characters (~100 bytes)
sb.append(":");
sb.append("abcdefghijklmnopqrstuvwxzy0123456789a1b2c3d4f5g6h7");
sb.append(":");
}
}
return sb.toString();
}
}
| 6,109 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/JedisConnectionFactoryIntegrationTest.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import com.netflix.dyno.jedis.utils.MockedRedisResponse;
import org.junit.Assert;
import org.junit.Test;
import javax.net.ssl.SSLContext;
import java.util.*;
import static com.netflix.dyno.jedis.utils.SSLContextUtil.createAndInitSSLContext;
/**
* This tests checks if we can use Jedis with/without SSL/TLS against real TCP server, and because of Redis
* itself doesn't support SSL, we are using "dummy" Redis that is able to answer our "GET key" command.
*/
public class JedisConnectionFactoryIntegrationTest {
private final int port = 8998;
private final String rack = "rack1";
private final String datacenter = "rack";
private final Host localHost = new HostBuilder().setHostname("localhost").setPort(port).setRack(rack).setStatus(Host.Status.Up).createHost();
private final HostSupplier localHostSupplier = new HostSupplier() {
@Override
public List<Host> getHosts() {
return Collections.singletonList(localHost);
}
};
private final TokenMapSupplier supplier = new TokenMapSupplier() {
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
return Collections.singletonList(localHostToken);
}
@Override
public HostToken getTokenForHost(Host host, Set<Host> activeHosts) {
return localHostToken;
}
final HostToken localHostToken = new HostToken(100000L, localHost);
};
@Test
public void testSSLJedisClient() throws Exception {
//given
final String expectedValue = String.valueOf(new Random().nextInt());
//lets start server with SSL
final MockedRedisResponse mockedRedisResponse = new MockedRedisResponse(expectedValue, true);
//when
mockedRedisResponse.start();
final DynoJedisClient dynoClient = constructJedisClient(true);
//key doesn't matter here, we just want to test tcp connection
final String value = dynoClient.get("keyNameDoestMatter");
dynoClient.stopClient();
mockedRedisResponse.stop();
//then
Assert.assertEquals(value, expectedValue);
}
@Test
public void testWithNoSSLJedisClient() throws Exception {
//given
final String expectedValue = String.valueOf(new Random().nextInt());
//lets start server without SSL
final MockedRedisResponse mockedRedisResponse = new MockedRedisResponse(expectedValue, false);
//when
mockedRedisResponse.start();
final DynoJedisClient dynoClient = constructJedisClient(false);
//key doesn't matter here, we just want to test tcp connection
final String value = dynoClient.get("keyNameDoestMatter");
dynoClient.stopClient();
mockedRedisResponse.stop();
//then
Assert.assertEquals(value, expectedValue);
}
private DynoJedisClient constructJedisClient(final boolean withSsl) throws Exception {
final ConnectionPoolConfigurationImpl connectionPoolConfiguration = new ConnectionPoolConfigurationImpl(rack);
connectionPoolConfiguration.withTokenSupplier(supplier);
connectionPoolConfiguration.setLocalRack(rack);
connectionPoolConfiguration.setLocalDataCenter(datacenter);
final SSLContext sslContext = createAndInitSSLContext("client.jks");
final DynoJedisClient.Builder builder = new DynoJedisClient.Builder()
.withApplicationName("appname")
.withDynomiteClusterName(rack)
.withHostSupplier(localHostSupplier)
.withCPConfig(connectionPoolConfiguration);
if (withSsl) {
return builder
.withSSLSocketFactory(sslContext.getSocketFactory())
.build();
} else {
return builder.build();
}
}
}
| 6,110 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/UnitTestConnectionPoolForCompression.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.*;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.impl.ConnectionContextImpl;
import com.netflix.dyno.connectionpool.impl.OperationResultImpl;
import com.netflix.dyno.connectionpool.impl.utils.ZipUtils;
import com.netflix.dyno.connectionpool.TokenRackMapper;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import redis.clients.jedis.Jedis;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.when;
public class UnitTestConnectionPoolForCompression implements ConnectionPool<Jedis> {
Map<String, String> redis_data;
@Mock
Jedis client;
@Mock
Connection<Jedis> connection;
private final ConnectionPoolConfiguration config;
private final ConnectionContextImpl context = new ConnectionContextImpl();
private final OperationMonitor opMonitor;
public UnitTestConnectionPoolForCompression(ConnectionPoolConfiguration config, OperationMonitor opMonitor) {
MockitoAnnotations.initMocks(this);
this.config = config;
this.opMonitor = opMonitor;
this.redis_data = new HashMap<String, String>();
when(client.set(anyString(), anyString())).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
String key = (String) invocation.getArguments()[0];
String value = (String) invocation.getArguments()[1];
redis_data.put(key, value);
return value;
}
});
when(client.get(CompressionTest.VALUE_1KB)).thenReturn(CompressionTest.VALUE_1KB);
when(client.get(CompressionTest.KEY_3KB)).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
return ZipUtils.compressStringToBase64String(CompressionTest.VALUE_3KB);
}
});
when(client.get(CompressionTest.KEY_1KB)).thenReturn(CompressionTest.VALUE_1KB);
when(client.hmset(anyString(), anyMap())).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
Map<String, String> map = (Map<String, String>) invocation.getArguments()[1];
if (map != null) {
if (map.containsKey(CompressionTest.KEY_3KB)) {
if (ZipUtils.isCompressed(map.get(CompressionTest.KEY_3KB))) {
return "OK";
} else {
throw new RuntimeException("Value was not compressed");
}
}
} else {
throw new RuntimeException("Map is NULL");
}
return "OK";
}
});
when(client.mget(Matchers.<String>anyVararg())).thenAnswer(new Answer<List<String>>() {
@Override
public List<String> answer(InvocationOnMock invocation) throws Throwable {
// Get the keys passed
Object[] keys = invocation.getArguments();
List<String> values = new ArrayList<String>(10);
for (int i = 0; i < keys.length; i++) {
// get the ith key, find the value in redis_data
// if found, return that else return nil
String key = (String) keys[i];
String value = redis_data.get(key);
values.add(i, value);
}
return values;
}
});
}
@Override
public boolean addHost(Host host) {
return true;
}
@Override
public boolean removeHost(Host host) {
return true;
}
@Override
public boolean isHostUp(Host host) {
return false;
}
@Override
public boolean hasHost(Host host) {
return false;
}
@Override
public List<HostConnectionPool<Jedis>> getActivePools() {
return null;
}
@Override
public List<HostConnectionPool<Jedis>> getPools() {
return null;
}
@Override
public HostConnectionPool<Jedis> getHostPool(Host host) {
return null;
}
@Override
public <R> Collection<OperationResult<R>> executeWithRing(TokenRackMapper tokenRackMapper, Operation<Jedis, R> op) throws DynoException {
return null;
}
@Override
public <R> ListenableFuture<OperationResult<R>> executeAsync(AsyncOperation<Jedis, R> op) throws DynoException {
return null;
}
@Override
public <R> OperationResult<R> executeWithFailover(Operation<Jedis, R> op) throws DynoException {
try {
R r = op.execute(client, context);
if (context.hasMetadata("compression") || context.hasMetadata("decompression")) {
opMonitor.recordSuccess(op.getName(), true);
} else {
opMonitor.recordSuccess(op.getName());
}
return new OperationResultImpl<R>("Test", r, null);
} finally {
context.reset();
}
}
@Override
public void shutdown() {
}
@Override
public Future<Boolean> start() throws DynoException {
return new Future<Boolean>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return false;
}
@Override
public Boolean get() throws InterruptedException, ExecutionException {
return true;
}
@Override
public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
return true;
}
};
}
@Override
public void idle() {
}
@Override
public ConnectionPoolConfiguration getConfiguration() {
return config;
}
@Override
public HealthTracker<Jedis> getHealthTracker() {
return null;
}
@Override
public boolean isIdle() {
return false;
}
@Override
public Future<Boolean> updateHosts(Collection activeHosts, Collection inactiveHosts) {
return null;
}
}
| 6,111 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/RedisAuthenticationIntegrationTest.java | package com.netflix.dyno.jedis;
import com.google.common.base.Throwables;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.Host.Status;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.exception.DynoConnectException;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.connectionpool.impl.CountingConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.impl.HostConnectionPoolImpl;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import com.netflix.dyno.contrib.DynoOPMonitor;
import org.junit.After;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.exceptions.JedisDataException;
import redis.embedded.RedisServer;
import redis.embedded.RedisServerBuilder;
import java.net.ConnectException;
import java.util.Collections;
import java.util.List;
import java.util.Set;
public class RedisAuthenticationIntegrationTest {
private static final int REDIS_PORT = 8998;
private static final String REDIS_RACK = "rack-1c";
private static final String REDIS_DATACENTER = "rack-1";
private RedisServer redisServer;
@Before
public void setUp() throws Exception {
// skip tests on windows due to https://github.com/spinnaker/embedded-redis#redis-version
Assume.assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win"));
}
@After
public void tearDown() throws Exception {
if (redisServer != null) {
redisServer.stop();
}
}
@Test
public void testDynoClient_noAuthSuccess() throws Exception {
redisServer = new RedisServer(REDIS_PORT);
redisServer.start();
Host noAuthHost = new HostBuilder().setHostname("localhost").setPort(REDIS_PORT).setRack(REDIS_RACK).setStatus(Status.Up).createHost();
TokenMapSupplierImpl tokenMapSupplier = new TokenMapSupplierImpl(noAuthHost);
DynoJedisClient dynoClient = constructJedisClient(tokenMapSupplier,
() -> Collections.singletonList(noAuthHost));
String statusCodeReply = dynoClient.set("some-key", "some-value");
Assert.assertEquals("OK", statusCodeReply);
String value = dynoClient.get("some-key");
Assert.assertEquals("some-value", value);
}
@Test
public void testDynoClient_authSuccess() throws Exception {
redisServer = new RedisServerBuilder()
.port(REDIS_PORT)
.setting("requirepass password")
.build();
redisServer.start();
Host authHost = new HostBuilder().setHostname("localhost").setPort(REDIS_PORT).setRack(REDIS_RACK).setStatus(Status.Up).setHashtag(null).setPassword("password").createHost();
TokenMapSupplierImpl tokenMapSupplier = new TokenMapSupplierImpl(authHost);
DynoJedisClient dynoClient = constructJedisClient(tokenMapSupplier,
() -> Collections.singletonList(authHost));
String statusCodeReply = dynoClient.set("some-key", "some-value");
Assert.assertEquals("OK", statusCodeReply);
String value = dynoClient.get("some-key");
Assert.assertEquals("some-value", value);
}
@Test
public void testJedisConnFactory_noAuthSuccess() throws Exception {
redisServer = new RedisServer(REDIS_PORT);
redisServer.start();
Host noAuthHost = new HostBuilder().setHostname("localhost").setPort(REDIS_PORT).setRack(REDIS_RACK).setStatus(Status.Up).createHost();
JedisConnectionFactory conFactory =
new JedisConnectionFactory(new DynoOPMonitor("some-application-name"), null);
ConnectionPoolConfiguration cpConfig = new ConnectionPoolConfigurationImpl("some-name");
CountingConnectionPoolMonitor poolMonitor = new CountingConnectionPoolMonitor();
HostConnectionPool<Jedis> hostConnectionPool =
new HostConnectionPoolImpl<>(noAuthHost, conFactory, cpConfig, poolMonitor);
Connection<Jedis> connection = conFactory
.createConnection(hostConnectionPool);
connection.execPing();
}
@Test
public void testJedisConnFactory_authSuccess() throws Exception {
redisServer = new RedisServerBuilder()
.port(REDIS_PORT)
.setting("requirepass password")
.build();
redisServer.start();
Host authHost = new HostBuilder().setHostname("localhost").setPort(REDIS_PORT).setRack(REDIS_RACK).setStatus(Status.Up).setHashtag(null).setPassword("password").createHost();
JedisConnectionFactory conFactory =
new JedisConnectionFactory(new DynoOPMonitor("some-application-name"), null);
ConnectionPoolConfiguration cpConfig = new ConnectionPoolConfigurationImpl("some-name");
CountingConnectionPoolMonitor poolMonitor = new CountingConnectionPoolMonitor();
HostConnectionPool<Jedis> hostConnectionPool =
new HostConnectionPoolImpl<>(authHost, conFactory, cpConfig, poolMonitor);
Connection<Jedis> connection = conFactory
.createConnection(hostConnectionPool);
connection.execPing();
}
@Test
public void testJedisConnFactory_connectionFailed() throws Exception {
Host noAuthHost = new HostBuilder().setHostname("localhost").setPort(REDIS_PORT).setRack(REDIS_RACK).setStatus(Status.Up).createHost();
JedisConnectionFactory conFactory =
new JedisConnectionFactory(new DynoOPMonitor("some-application-name"), null);
ConnectionPoolConfiguration cpConfig = new ConnectionPoolConfigurationImpl("some-name");
CountingConnectionPoolMonitor poolMonitor = new CountingConnectionPoolMonitor();
HostConnectionPool<Jedis> hostConnectionPool =
new HostConnectionPoolImpl<>(noAuthHost, conFactory, cpConfig, poolMonitor);
Connection<Jedis> connection = conFactory
.createConnection(hostConnectionPool);
try {
connection.execPing();
Assert.fail("expected to throw");
} catch (DynoConnectException e) {
Assert.assertTrue("root cause should be connect exception",
Throwables.getRootCause(e) instanceof ConnectException);
}
}
@Test
public void testJedisConnFactory_authenticationRequired() throws Exception {
redisServer = new RedisServerBuilder()
.port(REDIS_PORT)
.setting("requirepass password")
.build();
redisServer.start();
Host noAuthHost = new HostBuilder().setHostname("localhost").setPort(REDIS_PORT).setRack(REDIS_RACK).setStatus(Status.Up).createHost();
JedisConnectionFactory conFactory =
new JedisConnectionFactory(new DynoOPMonitor("some-application-name"), null);
ConnectionPoolConfiguration cpConfig = new ConnectionPoolConfigurationImpl("some-name");
CountingConnectionPoolMonitor poolMonitor = new CountingConnectionPoolMonitor();
HostConnectionPool<Jedis> hostConnectionPool =
new HostConnectionPoolImpl<>(noAuthHost, conFactory, cpConfig, poolMonitor);
Connection<Jedis> connection = conFactory
.createConnection(hostConnectionPool);
try {
connection.execPing();
Assert.fail("expected to throw");
} catch (JedisDataException e) {
Assert.assertEquals("NOAUTH Authentication required.", e.getMessage());
}
}
@Test
public void testJedisConnFactory_invalidPassword() throws Exception {
redisServer = new RedisServerBuilder()
.port(REDIS_PORT)
.setting("requirepass password")
.build();
redisServer.start();
Host authHost = new HostBuilder().setHostname("localhost").setPort(REDIS_PORT).setRack(REDIS_RACK).setStatus(Status.Up).setHashtag(null).setPassword("invalid-password").createHost();
JedisConnectionFactory jedisConnectionFactory =
new JedisConnectionFactory(new DynoOPMonitor("some-application-name"), null);
ConnectionPoolConfiguration connectionPoolConfiguration = new ConnectionPoolConfigurationImpl(
"some-name");
HostConnectionPool<Jedis> hostConnectionPool = new HostConnectionPoolImpl<>(authHost,
jedisConnectionFactory, connectionPoolConfiguration, new CountingConnectionPoolMonitor());
Connection<Jedis> connection = jedisConnectionFactory
.createConnection(hostConnectionPool);
try {
connection.execPing();
Assert.fail("expected to throw");
} catch (JedisDataException e) {
Assert.assertEquals("ERR invalid password", e.getMessage());
}
}
private DynoJedisClient constructJedisClient(TokenMapSupplier tokenMapSupplier,
HostSupplier hostSupplier) {
final ConnectionPoolConfigurationImpl connectionPoolConfiguration =
new ConnectionPoolConfigurationImpl(REDIS_RACK);
connectionPoolConfiguration.withTokenSupplier(tokenMapSupplier);
connectionPoolConfiguration.setLocalRack(REDIS_RACK);
connectionPoolConfiguration.setLocalDataCenter(REDIS_DATACENTER);
return new DynoJedisClient.Builder()
.withApplicationName("some-application-name")
.withDynomiteClusterName(REDIS_RACK)
.withHostSupplier(hostSupplier)
.withCPConfig(connectionPoolConfiguration)
.build();
}
private static class TokenMapSupplierImpl implements TokenMapSupplier {
private final HostToken localHostToken;
private TokenMapSupplierImpl(Host host) {
this.localHostToken = new HostToken(100000L, host);
}
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
return Collections.singletonList(localHostToken);
}
@Override
public HostToken getTokenForHost(Host host, Set<Host> activeHosts) {
return localHostToken;
}
}
}
| 6,112 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/UnitTestConnectionPool.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.*;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.impl.ConnectionContextImpl;
import com.netflix.dyno.connectionpool.impl.OperationResultImpl;
import com.netflix.dyno.connectionpool.TokenRackMapper;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import redis.clients.jedis.Jedis;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.when;
public class UnitTestConnectionPool implements ConnectionPool<Jedis> {
Map<String, String> redis_data;
@Mock
Jedis client;
@Mock
Connection<Jedis> connection;
private final ConnectionPoolConfiguration config;
private final ConnectionContextImpl context = new ConnectionContextImpl();
private final OperationMonitor opMonitor;
public UnitTestConnectionPool(ConnectionPoolConfiguration config, OperationMonitor opMonitor) {
MockitoAnnotations.initMocks(this);
this.config = config;
this.opMonitor = opMonitor;
this.redis_data = new HashMap<String, String>();
when(client.set(anyString(), anyString())).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
String key = (String) invocation.getArguments()[0];
String value = (String) invocation.getArguments()[1];
redis_data.put(key, value);
return "OK";
}
});
when(client.get(anyString())).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
String key = (String) invocation.getArguments()[0];
return redis_data.get(key);
}
});
when(client.del(anyString())).thenAnswer(new Answer<Long>() {
@Override
public Long answer(InvocationOnMock invocation) throws Throwable {
String key = (String) invocation.getArguments()[0];
if (redis_data.remove(key) != null) {
return (long) 1;
}
return (long) 0;
}
});
when(client.hmset(anyString(), anyMap())).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
Map<String, String> map = (Map<String, String>) invocation.getArguments()[1];
if (map != null) {
if (map.containsKey(CommandTest.KEY_1KB)) {
return "OK";
}
} else {
throw new RuntimeException("Map is NULL");
}
return "OK";
}
});
when(client.mget(Matchers.<String>anyVararg())).thenAnswer(new Answer<List<String>>() {
@Override
public List<String> answer(InvocationOnMock invocation) throws Throwable {
// Get the keys passed
Object[] keys = invocation.getArguments();
List<String> values = new ArrayList<String>(10);
for (int i = 0; i < keys.length; i++) {
// get the ith key, find the value in redis_data
// if found, return that else return nil
String key = (String) keys[i];
String value = redis_data.get(key);
values.add(i, value);
}
return values;
}
});
}
@Override
public boolean addHost(Host host) {
return true;
}
@Override
public boolean removeHost(Host host) {
return true;
}
@Override
public boolean isHostUp(Host host) {
return false;
}
@Override
public boolean hasHost(Host host) {
return false;
}
@Override
public List<HostConnectionPool<Jedis>> getActivePools() {
return null;
}
@Override
public List<HostConnectionPool<Jedis>> getPools() {
return null;
}
@Override
public HostConnectionPool<Jedis> getHostPool(Host host) {
return null;
}
@Override
public <R> Collection<OperationResult<R>> executeWithRing(TokenRackMapper tokenRackMapper, Operation<Jedis, R> op) throws DynoException {
return null;
}
@Override
public <R> ListenableFuture<OperationResult<R>> executeAsync(AsyncOperation<Jedis, R> op) throws DynoException {
return null;
}
@Override
public <R> OperationResult<R> executeWithFailover(Operation<Jedis, R> op) throws DynoException {
try {
R r = op.execute(client, context);
opMonitor.recordSuccess(op.getName());
return new OperationResultImpl<R>("Test", r, null);
} finally {
context.reset();
}
}
@Override
public void shutdown() {
}
@Override
public Future<Boolean> start() throws DynoException {
return new Future<Boolean>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return false;
}
@Override
public Boolean get() throws InterruptedException, ExecutionException {
return true;
}
@Override
public Boolean get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return true;
}
};
}
@Override
public void idle() {
}
@Override
public ConnectionPoolConfiguration getConfiguration() {
return config;
}
@Override
public HealthTracker<Jedis> getHealthTracker() {
return null;
}
@Override
public boolean isIdle() {
return false;
}
@Override
public Future<Boolean> updateHosts(Collection activeHosts, Collection inactiveHosts) {
return null;
}
}
| 6,113 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/ExpireHashTest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.MockitoAnnotations;
import redis.clients.jedis.ScanResult;
import java.io.IOException;
import java.util.*;
import java.util.stream.Collectors;
/**
* Tests generic commands.
* <p>
* Note - The underlying jedis client has been mocked to echo back the value
* given for SET operations
*/
public class ExpireHashTest {
private static final String REDIS_RACK = "rack-1c";
private static final String REDIS_DATACENTER = "rack-1";
private DynoJedisClient client;
private UnitTestTokenMapAndHostSupplierImpl tokenMapAndHostSupplier;
@Before
public void before() throws IOException {
MockitoAnnotations.initMocks(this);
tokenMapAndHostSupplier = new UnitTestTokenMapAndHostSupplierImpl(3, REDIS_RACK);
final ConnectionPoolConfigurationImpl connectionPoolConfiguration =
new ConnectionPoolConfigurationImpl(REDIS_RACK);
connectionPoolConfiguration.withTokenSupplier(tokenMapAndHostSupplier);
connectionPoolConfiguration.withHashtag("{}");
connectionPoolConfiguration.setLocalRack(REDIS_RACK);
connectionPoolConfiguration.setLocalDataCenter(REDIS_DATACENTER);
client = new DynoJedisClient.Builder()
.withApplicationName("CommandTest")
.withDynomiteClusterName(REDIS_RACK)
.withHostSupplier(tokenMapAndHostSupplier)
.withCPConfig(connectionPoolConfiguration)
.build();
}
@After
public void after() {
client.stopClient();
tokenMapAndHostSupplier.shutdown();
}
@Test
public void testBasicCommands() {
final String expireHashKey = "expireHashKey";
Assert.assertEquals(new Long(1L), client.ehset(expireHashKey, "hello", "world", 900));
Assert.assertEquals(new Long(0L), client.ehsetnx(expireHashKey, "hello", "world", 900));
Assert.assertEquals("world", client.ehget(expireHashKey, "hello"));
Assert.assertTrue(client.ehexists(expireHashKey, "hello"));
Assert.assertEquals(new Long(1L), client.ehdel(expireHashKey, "hello"));
Assert.assertNull(client.ehget(expireHashKey, "hello"));
// verify metadata explicitly
Set<String> mFields = client.zrangeByScore(client.ehashMetadataKey(expireHashKey), 0, Integer.MAX_VALUE);
Assert.assertEquals(Collections.EMPTY_SET, mFields);
}
@Test
public void testSecondaryKeyTimeout() throws Exception {
final String expireHashKey = "expireHashKey";
long veryShortTimeout = 1;
long shortTimeout = 3; //seconds
long longTimeout = 100;
long startTime = System.currentTimeMillis();
Assert.assertEquals(new Long(1L), client.ehset(expireHashKey, "hello", "world", veryShortTimeout));
Assert.assertEquals(new Long(1L), client.ehset(expireHashKey, "alice", "bob", shortTimeout));
Assert.assertEquals(new Long(1L), client.ehset(expireHashKey, "foo", "bar", longTimeout));
Assert.assertEquals("world", client.ehget(expireHashKey, "hello"));
Assert.assertEquals("bob", client.ehget(expireHashKey, "alice"));
Assert.assertEquals("bar", client.ehget(expireHashKey, "foo"));
Thread.sleep(veryShortTimeout * 1000L);
Assert.assertNull(client.ehget(expireHashKey, "hello"));
Assert.assertEquals("bob", client.ehget(expireHashKey, "alice"));
Assert.assertEquals("bar", client.ehget(expireHashKey, "foo"));
// check timeout on the secondary key
long timeElapsed = System.currentTimeMillis() - startTime;
long timeRemainingInSecs = longTimeout - (timeElapsed / 1000L);
Assert.assertTrue(client.ehttl(expireHashKey, "foo") == timeRemainingInSecs ||
client.ehttl(expireHashKey, "foo") == (timeRemainingInSecs + 1));
// check timeout on expirehash
Assert.assertEquals(new Long(-1), client.ehttl(expireHashKey));
Assert.assertEquals(new Long(1), client.ehexpire(expireHashKey, 20));
// check the ttl values
Assert.assertTrue(client.ehttl(expireHashKey) == 20 ||
client.ehttl(expireHashKey) == (20 - 1));
Assert.assertEquals(new Long(1), client.ehpersist(expireHashKey));
Assert.assertEquals(new Long(-1), client.ehttl(expireHashKey));
// verify metadata explicitly
Set<String> mFields = client.zrangeByScore(client.ehashMetadataKey(expireHashKey), 0, Integer.MAX_VALUE);
Assert.assertEquals(2, mFields.size());
Thread.sleep(shortTimeout * 1000L);
Assert.assertNull(client.ehget(expireHashKey, "alice"));
Assert.assertEquals("bar", client.ehget(expireHashKey, "foo"));
// verify metadata explicitly
mFields = client.zrangeByScore(client.ehashMetadataKey(expireHashKey), 0, Integer.MAX_VALUE);
Assert.assertEquals(1, mFields.size());
}
@Test
public void testMultipleFields() throws InterruptedException {
String expireHashKey = "expireHashKey";
final String secondaryKeyPrefix = "secondaryKey-";
final String valuePrefix = "value-";
final int fieldCount = 10;
final long minTimeout = 2; //seconds
Map<String, Pair<String, Long>> fields = new HashMap<>();
for (int i = 0; i < fieldCount; i++) {
fields.put(secondaryKeyPrefix + i, new ImmutablePair<>(valuePrefix + i, i + minTimeout));
}
Assert.assertEquals("OK", client.ehmset(expireHashKey, fields));
long startTime = System.currentTimeMillis();
Map<String, String> allFields = client.ehgetall(expireHashKey);
List<String> mgetFields = client.ehmget(expireHashKey, fields.keySet().toArray(new String[0]));
Assert.assertEquals(fieldCount, allFields.size());
Assert.assertTrue(allFields.values().containsAll(mgetFields));
Assert.assertEquals(fields.size(), client.ehlen(expireHashKey).longValue());
Set<String> allKeys = client.ehkeys(expireHashKey);
List<String> allVals = client.ehvals(expireHashKey);
Assert.assertEquals(fieldCount, allKeys.size());
Assert.assertEquals(fieldCount, allVals.size());
Assert.assertTrue(allKeys.containsAll(fields.keySet()));
Assert.assertTrue(allVals.containsAll(fields.values().stream().map(Pair::getLeft).collect(Collectors.toSet())));
Assert.assertEquals(new Long(0), client.ehrenamenx(expireHashKey, expireHashKey));
Assert.assertEquals("OK", client.ehrename(expireHashKey, expireHashKey + "_new"));
expireHashKey = expireHashKey + "_new";
Thread.sleep((minTimeout + 2) * 1000L);
long timeElapsed = System.currentTimeMillis() - startTime;
long remainingCount = fieldCount - (long) Math.floor((double) timeElapsed / 1000L);
Map<String, String> remainingFields = client.ehgetall(expireHashKey);
Set<String> mFields = client.zrangeByScore(client.ehashMetadataKey(expireHashKey), 0, Integer.MAX_VALUE);
Assert.assertTrue(remainingFields.size() == remainingCount ||
remainingFields.size() == remainingCount + 1);
// verify metadata explicitly
Assert.assertTrue(mFields.size() == remainingCount ||
mFields.size() == remainingCount + 1);
}
@Test
public void testScan() {
String expireHashKey = "expireHashKey";
final String secondaryKeyPrefix = "secondaryKey-";
final String valuePrefix = "value-";
final int fieldCount = 1000;
final long minTimeout = 15; //seconds
Map<String, Pair<String, Long>> fields = new HashMap<>();
for (int i = 1; i <= fieldCount; i++) {
fields.put(secondaryKeyPrefix + i, new ImmutablePair<>(valuePrefix + i, i + minTimeout));
if (i % 100 == 0) {
Assert.assertEquals("OK", client.ehmset(expireHashKey, fields));
fields = new HashMap<>();
}
}
int count = 0;
String cursor = "0";
do {
ScanResult<Map.Entry<String, String>> values = client.ehscan(expireHashKey, cursor);
count += values.getResult().size();
cursor = values.getCursor();
} while (cursor.compareTo("0") != 0);
Assert.assertEquals(fieldCount, count);
}
}
| 6,114 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/ConnectionTest.java | package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.MockitoAnnotations;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
public class ConnectionTest {
private static final String REDIS_RACK = "rack-1c";
private static final String REDIS_DATACENTER = "rack-1";
private DynoJedisClient client;
private UnitTestTokenMapAndHostSupplierImpl tokenMapAndHostSupplier;
private static final int HEALTH_TRACKER_WAIT_MILLIS = 2 * 1000;
private static final int POOL_RECONNECT_DELAY_MILLIS = 0;
@Before
public void before() throws IOException {
MockitoAnnotations.initMocks(this);
tokenMapAndHostSupplier = new UnitTestTokenMapAndHostSupplierImpl(1, REDIS_RACK);
final ConnectionPoolConfigurationImpl connectionPoolConfiguration =
new ConnectionPoolConfigurationImpl(REDIS_RACK)
.withTokenSupplier(tokenMapAndHostSupplier)
.withHashtag("{}")
.withPoolReconnectWaitMillis(POOL_RECONNECT_DELAY_MILLIS)
.withHealthTrackerDelayMills(HEALTH_TRACKER_WAIT_MILLIS)
.setLocalRack(REDIS_RACK)
.setLocalDataCenter(REDIS_DATACENTER);
client = new DynoJedisClient.Builder()
.withApplicationName("CommandTest")
.withDynomiteClusterName(REDIS_RACK)
.withHostSupplier(tokenMapAndHostSupplier)
.withCPConfig(connectionPoolConfiguration)
.build();
}
@After
public void after() {
client.stopClient();
tokenMapAndHostSupplier.shutdown();
}
@Test
public void testConnectionFailureWithDynoException() {
int startConnectionCount = client.getConnPool().getHostPool(tokenMapAndHostSupplier.getHosts().get(0)).size();
// shutdown server before operation
tokenMapAndHostSupplier.pauseServer(0);
for (int i = 0; i < startConnectionCount; i++) {
try {
client.set("testkey", "testval");
} catch (DynoException de) {
// ignore
}
}
try {
tokenMapAndHostSupplier.resumeServer(0);
Thread.sleep(HEALTH_TRACKER_WAIT_MILLIS + POOL_RECONNECT_DELAY_MILLIS + 2 * 1000);
} catch (InterruptedException ie) {
// ignore
}
int endConnectionCount = client.getConnPool().getHostPool(tokenMapAndHostSupplier.getHosts().get(0)).size();
assertEquals("ConnectionPool reconnect failed", startConnectionCount, endConnectionCount);
}
}
| 6,115 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/utils/SSLContextUtil.java | package com.netflix.dyno.jedis.utils;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManagerFactory;
import java.io.InputStream;
import java.security.KeyStore;
import java.security.SecureRandom;
public class SSLContextUtil {
/**
* hardcoded password for both keystore/truststore for client and server. Because of we are using
* selfsigned certificates generated only for this purpose, it is perfectly ok to have publicly aviable password here.
*/
private static final String STOREPASS = "dynotests";
public static final SSLContext createAndInitSSLContext(final String jksFileName) throws Exception {
final ClassLoader classloader = Thread.currentThread().getContextClassLoader();
final InputStream inputStream = classloader.getResourceAsStream(jksFileName);
final KeyStore trustStore = KeyStore.getInstance("jks");
trustStore.load(inputStream, STOREPASS.toCharArray());
final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
keyManagerFactory.init(trustStore, STOREPASS.toCharArray());
final TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
trustManagerFactory.init(trustStore);
final SSLContext sslContext = SSLContext.getInstance("TLSv1.2");
sslContext.init(keyManagerFactory.getKeyManagers(), trustManagerFactory.getTrustManagers(), new SecureRandom());
return sslContext;
}
}
| 6,116 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/utils/EmbeddedRedisInitializer.java | package com.netflix.dyno.jedis.utils;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.string.StringDecoder;
import io.netty.handler.codec.string.StringEncoder;
import io.netty.handler.ssl.SslHandler;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
public class EmbeddedRedisInitializer extends ChannelInitializer<SocketChannel> {
private final SSLContext sslContext;
private final boolean useSsl;
private final String response;
public EmbeddedRedisInitializer(final SSLContext sslContext, final boolean useSsl, final String response) {
this.sslContext = sslContext;
this.useSsl = useSsl;
this.response = response;
}
@Override
protected void initChannel(final SocketChannel ch) throws Exception {
final ChannelPipeline pipeline = ch.pipeline();
if (useSsl) {
final SSLEngine sslEngine = sslContext.createSSLEngine();
sslEngine.setUseClientMode(false);
sslEngine.getNeedClientAuth();
pipeline.addLast("sslHandler", new SslHandler(sslEngine));
}
pipeline.addLast(new StringDecoder());
pipeline.addLast(new StringEncoder());
pipeline.addLast(new MockedResponseHandler(response));
}
}
| 6,117 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/utils/MockedRedisResponse.java | package com.netflix.dyno.jedis.utils;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.Channel;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler;
import javax.net.ssl.SSLContext;
import static com.netflix.dyno.jedis.utils.SSLContextUtil.createAndInitSSLContext;
/**
* Simple server that pretends to be redis and will response for all requests with predefined string, surrounding
* it with Bulk Strings (see https://redis.io/topics/protocol). Basically it will response with following data
* <pre>
* $4\r\n
* test\r\n
* </pre>
* <p>
* It allows us to test SSL/TLS end to end scenario, assuming that after connection
* to redis(here, to MockedRedisResponse server) by Jedis client, only simple GET will be invoked.
*/
public class MockedRedisResponse {
private final String response;
private final boolean useSsl;
private ServerBootstrap serverBootstrap;
private Channel serverChannel;
private EventLoopGroup bossGroup;
private EventLoopGroup workerGroup;
public MockedRedisResponse(final String response, final boolean useSsl) {
this.response = response;
this.useSsl = useSsl;
}
public void start() throws Exception {
final SSLContext sslContext = createAndInitSSLContext("server.jks");
bossGroup = new NioEventLoopGroup(1);
workerGroup = new NioEventLoopGroup();
serverBootstrap = new ServerBootstrap();
serverBootstrap.group(bossGroup, workerGroup)//
.channel(NioServerSocketChannel.class) //
.handler(new LoggingHandler(LogLevel.INFO)) //
.childHandler(new EmbeddedRedisInitializer(sslContext, useSsl, response));
serverChannel = serverBootstrap.bind(8998).sync().channel();
}
public void stop() throws InterruptedException {
serverChannel.close().sync();
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
| 6,118 |
0 | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis | Create_ds/dyno/dyno-jedis/src/test/java/com/netflix/dyno/jedis/utils/MockedResponseHandler.java | package com.netflix.dyno.jedis.utils;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
public class MockedResponseHandler extends SimpleChannelInboundHandler<String> {
private final String response;
public MockedResponseHandler(final String response) {
this.response = response;
}
@Override
protected void channelRead0(final ChannelHandlerContext ctx, final String msg) throws Exception {
ctx.writeAndFlush("$" + response.length() + "\r\n");
ctx.writeAndFlush(response + "\r\n");
}
}
| 6,119 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/DynoJedisPipeline.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.jedis;
import com.google.common.base.Strings;
import com.netflix.dyno.connectionpool.*;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.exception.FatalConnectionException;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolImpl;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import com.netflix.dyno.connectionpool.impl.utils.ZipUtils;
import com.netflix.dyno.jedis.JedisConnectionFactory.JedisConnection;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.*;
import redis.clients.jedis.commands.BinaryRedisPipeline;
import redis.clients.jedis.commands.RedisPipeline;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
import javax.annotation.concurrent.NotThreadSafe;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import static com.netflix.dyno.connectionpool.ConnectionPoolConfiguration.CompressionStrategy;
@NotThreadSafe
public class DynoJedisPipeline implements RedisPipeline, BinaryRedisPipeline, AutoCloseable {
private static final Logger Logger = LoggerFactory.getLogger(DynoJedisPipeline.class);
// ConnPool and connection to exec the pipeline
private final ConnectionPoolImpl<Jedis> connPool;
private volatile Connection<Jedis> connection;
private final DynoJedisPipelineMonitor opMonitor;
private final ConnectionPoolMonitor cpMonitor;
// the cached pipeline
private volatile Pipeline jedisPipeline = null;
// the cached row key for the pipeline. all subsequent requests to pipeline
// must be the same. this is used to check that.
private final AtomicReference<String> theKey = new AtomicReference<String>(null);
private final AtomicReference<byte[]> theBinaryKey = new AtomicReference<byte[]>(null);
private final AtomicReference<String> hashtag = new AtomicReference<String>(null);
// used for tracking errors
private final AtomicReference<DynoException> pipelineEx = new AtomicReference<DynoException>(null);
private static final String DynoPipeline = "DynoPipeline";
DynoJedisPipeline(ConnectionPoolImpl<Jedis> cPool, DynoJedisPipelineMonitor operationMonitor,
ConnectionPoolMonitor connPoolMonitor) {
this.connPool = cPool;
this.opMonitor = operationMonitor;
this.cpMonitor = connPoolMonitor;
}
private void pipelined(final byte[] key) {
try {
try {
connection = connPool.getConnectionForOperation(new BaseOperation<Jedis, String>() {
@Override
public String getName() {
return DynoPipeline;
}
@Override
public String getStringKey() {// we do not use it in this context
return null;
}
@Override
public byte[] getBinaryKey() {
return key;
}
});
} catch (NoAvailableHostsException nahe) {
cpMonitor.incOperationFailure(connection != null ? connection.getHost() : null, nahe);
discardPipelineAndReleaseConnection();
throw nahe;
}
} catch (NoAvailableHostsException nahe) {
cpMonitor.incOperationFailure(connection != null ? connection.getHost() : null, nahe);
discardPipelineAndReleaseConnection();
throw nahe;
}
Jedis jedis = ((JedisConnection) connection).getClient();
jedisPipeline = jedis.pipelined();
cpMonitor.incOperationSuccess(connection.getHost(), 0);
}
private void pipelined(final String key) {
try {
try {
connection = connPool.getConnectionForOperation(new BaseOperation<Jedis, String>() {
@Override
public String getName() {
return DynoPipeline;
}
@Override
public String getStringKey() {
return key;
}
@Override
public byte[] getBinaryKey() { // we do not use it in this context
return null;
}
});
} catch (NoAvailableHostsException nahe) {
cpMonitor.incOperationFailure(connection != null ? connection.getHost() : null, nahe);
discardPipelineAndReleaseConnection();
throw nahe;
}
} catch (NoAvailableHostsException nahe) {
cpMonitor.incOperationFailure(connection != null ? connection.getHost() : null, nahe);
discardPipelineAndReleaseConnection();
throw nahe;
}
Jedis jedis = ((JedisConnection) connection).getClient();
jedisPipeline = jedis.pipelined();
cpMonitor.incOperationSuccess(connection.getHost(), 0);
}
private void checkHashtag(final String key, final String hashtagValue) {
if (this.hashtag.get() != null) {
verifyHashtagValue(hashtagValue);
} else {
boolean success = this.hashtag.compareAndSet(null, hashtagValue);
if (!success) {
verifyHashtagValue(hashtagValue);
} else {
pipelined(key);
}
}
}
/**
* Checks that a pipeline is associated with a single key. Binary keys do not
* support hashtags.
*
* @param key
*/
private void checkKey(final byte[] key) {
if (theBinaryKey.get() != null) {
verifyKey(key);
} else {
boolean success = theBinaryKey.compareAndSet(null, key);
if (!success) {
// someone already beat us to it. that's fine, just verify
// that the key is the same
verifyKey(key);
} else {
pipelined(key);
}
}
}
/**
* Checks that a pipeline is associated with a single key. If there is a hashtag
* defined in the first host of the connectionpool then we check that first.
*
* @param key
*/
private void checkKey(final String key) {
/*
* Get hashtag from the first host of the active pool We cannot use the
* connection object because as of now we have not selected a connection. A
* connection is selected based on the key or hashtag respectively.
*/
String hashtag = connPool.getConfiguration().getHashtag();
if (hashtag == null || hashtag.isEmpty()) {
if (theKey.get() != null) {
verifyKey(key);
} else {
boolean success = theKey.compareAndSet(null, key);
if (!success) {
// someone already beat us to it. that's fine, just verify
// that the key is the same
verifyKey(key);
} else {
pipelined(key);
}
}
} else {
/*
* We have a identified a hashtag in the Host object. That means Dynomite has a
* defined hashtag. Producing the hashvalue out of the hashtag and using that as
* a reference to the pipeline
*/
String hashValue = StringUtils.substringBetween(key, Character.toString(hashtag.charAt(0)),
Character.toString(hashtag.charAt(1)));
if (Strings.isNullOrEmpty(hashValue)) {
hashValue = key;
}
checkHashtag(key, hashValue);
}
}
/**
* Verifies binary key with pipeline binary key
*/
private void verifyKey(final byte[] key) {
if (!theBinaryKey.get().equals(key)) {
try {
throw new RuntimeException("Must have same key for Redis Pipeline in Dynomite. This key: " + key);
} finally {
discardPipelineAndReleaseConnection();
}
}
}
/**
* Verifies key with pipeline key
*/
private void verifyKey(final String key) {
if (!theKey.get().equals(key)) {
try {
throw new RuntimeException("Must have same key for Redis Pipeline in Dynomite. This key: " + key);
} finally {
discardPipelineAndReleaseConnection();
}
}
}
private void verifyHashtagValue(final String hashtagValue) {
if (!this.hashtag.get().equals(hashtagValue)) {
try {
throw new RuntimeException(
"Must have same hashtag for Redis Pipeline in Dynomite. This hashvalue: " + hashtagValue);
} finally {
discardPipelineAndReleaseConnection();
}
}
}
private String decompressValue(String value) {
try {
if (ZipUtils.isCompressed(value)) {
return ZipUtils.decompressFromBase64String(value);
}
} catch (IOException e) {
Logger.warn("Unable to decompress value [" + value + "]");
}
return value;
}
private byte[] decompressValue(byte[] value) {
try {
if (ZipUtils.isCompressed(value)) {
return ZipUtils.decompressBytesNonBase64(value);
}
} catch (IOException e) {
Logger.warn("Unable to decompress byte array value [" + value + "]");
}
return value;
}
/**
* As long as jdk 7 and below is supported we need to define our own function
* interfaces
*/
private interface Func0<R> {
R call();
}
public class PipelineResponse extends Response<String> {
private Response<String> response;
public PipelineResponse(Builder<String> b) {
super(BuilderFactory.STRING);
}
public PipelineResponse apply(Func0<? extends Response<String>> f) {
this.response = f.call();
return this;
}
@Override
public String get() {
return decompressValue(response.get());
}
}
public class PipelineLongResponse extends Response<Long> {
private Response<Long> response;
public PipelineLongResponse(Builder<Long> b) {
super(b);
}
public PipelineLongResponse apply(Func0<? extends Response<Long>> f) {
this.response = f.call();
return this;
}
}
public class PipelineListResponse extends Response<List<String>> {
private Response<List<String>> response;
public PipelineListResponse(Builder<List> b) {
super(BuilderFactory.STRING_LIST);
}
public PipelineListResponse apply(Func0<? extends Response<List<String>>> f) {
this.response = f.call();
return this;
}
@Override
public List<String> get() {
return new ArrayList<String>(
CollectionUtils.transform(response.get(), new CollectionUtils.Transform<String, String>() {
@Override
public String get(String s) {
return decompressValue(s);
}
}));
}
}
public class PipelineBinaryResponse extends Response<byte[]> {
private Response<byte[]> response;
public PipelineBinaryResponse(Builder<String> b) {
super(BuilderFactory.BYTE_ARRAY);
}
public PipelineBinaryResponse apply(Func0<? extends Response<byte[]>> f) {
this.response = f.call();
return this;
}
@Override
public byte[] get() {
return decompressValue(response.get());
}
}
public class PipelineMapResponse extends Response<Map<String, String>> {
private Response<Map<String, String>> response;
public PipelineMapResponse(Builder<Map<String, String>> b) {
super(BuilderFactory.STRING_MAP);
}
@Override
public Map<String, String> get() {
return CollectionUtils.transform(response.get(),
new CollectionUtils.MapEntryTransform<String, String, String>() {
@Override
public String get(String key, String val) {
return decompressValue(val);
}
});
}
}
public class PipelineBinaryMapResponse extends Response<Map<byte[], byte[]>> {
private Response<Map<byte[], byte[]>> response;
public PipelineBinaryMapResponse(Builder<Map<byte[], byte[]>> b) {
super(BuilderFactory.BYTE_ARRAY_MAP);
}
public PipelineBinaryMapResponse apply(Func0<? extends Response<Map<byte[], byte[]>>> f) {
this.response = f.call();
return this;
}
@Override
public Map<byte[], byte[]> get() {
return CollectionUtils.transform(response.get(),
new CollectionUtils.MapEntryTransform<byte[], byte[], byte[]>() {
@Override
public byte[] get(byte[] key, byte[] val) {
return decompressValue(val);
}
});
}
}
private abstract class PipelineOperation<R> {
abstract Response<R> execute(Pipeline jedisPipeline) throws DynoException;
Response<R> execute(final byte[] key, final OpName opName) {
checkKey(key);
return executeOperation(opName);
}
Response<R> execute(final String key, final OpName opName) {
checkKey(key);
return executeOperation(opName);
}
Response<R> executeOperation(final OpName opName) {
try {
opMonitor.recordOperation(opName.name());
return execute(jedisPipeline);
} catch (JedisConnectionException ex) {
handleConnectionException(ex);
throw ex;
}
}
void handleConnectionException(JedisConnectionException ex) {
DynoException e = new FatalConnectionException(ex).setAttempt(1);
pipelineEx.set(e);
cpMonitor.incOperationFailure(connection.getHost(), e);
}
}
private abstract class PipelineCompressionOperation<R> extends PipelineOperation<R> {
/**
* Compresses the value based on the threshold defined by
* {@link ConnectionPoolConfiguration#getValueCompressionThreshold()}
*
* @param value
* @return
*/
public String compressValue(String value) {
String result = value;
int thresholdBytes = connPool.getConfiguration().getValueCompressionThreshold();
try {
// prefer speed over accuracy here so rather than using
// getBytes() to get the actual size
// just estimate using 2 bytes per character
if ((2 * value.length()) > thresholdBytes) {
result = ZipUtils.compressStringToBase64String(value);
}
} catch (IOException e) {
Logger.warn("UNABLE to compress [" + value + "]; sending value uncompressed");
}
return result;
}
public byte[] compressValue(byte[] value) {
int thresholdBytes = connPool.getConfiguration().getValueCompressionThreshold();
if (value.length > thresholdBytes) {
try {
return ZipUtils.compressBytesNonBase64(value);
} catch (IOException e) {
Logger.warn("UNABLE to compress byte array [" + value + "]; sending value uncompressed");
}
}
return value;
}
}
@Override
public Response<Long> append(final String key, final String value) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.append(key, value);
}
}.execute(key, OpName.APPEND);
}
@Override
public Response<List<String>> blpop(final String arg) {
return new PipelineOperation<List<String>>() {
@Override
Response<List<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.blpop(arg);
}
}.execute(arg, OpName.BLPOP);
}
@Override
public Response<List<String>> brpop(final String arg) {
return new PipelineOperation<List<String>>() {
@Override
Response<List<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.brpop(arg);
}
}.execute(arg, OpName.BRPOP);
}
@Override
public Response<Long> decr(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.decr(key);
}
}.execute(key, OpName.DECR);
}
@Override
public Response<Long> decrBy(final String key, final long integer) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.decrBy(key, integer);
}
}.execute(key, OpName.DECRBY);
}
@Override
public Response<Long> del(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.del(key);
}
}.execute(key, OpName.DEL);
}
@Override
public Response<Long> unlink(String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.unlink(key);
}
}.execute(key, OpName.UNLINK);
}
@Override
public Response<String> echo(final String string) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.echo(string);
}
}.execute(string, OpName.ECHO);
}
@Override
public Response<Boolean> exists(final String key) {
return new PipelineOperation<Boolean>() {
@Override
Response<Boolean> execute(final Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.exists(key);
}
}.execute(key, OpName.EXISTS);
}
@Override
public Response<Long> expire(final String key, final int seconds) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(final Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.expire(key, seconds);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.EXPIRE.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.EXPIRE);
}
@Override
public Response<Long> pexpire(String key, long milliseconds) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> expireAt(final String key, final long unixTime) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(final Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.expireAt(key, unixTime);
}
}.execute(key, OpName.EXPIREAT);
}
@Override
public Response<Long> pexpireAt(String key, long millisecondsTimestamp) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> get(final String key) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.get(key);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.GET.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.GET);
} else {
return new PipelineCompressionOperation<String>() {
@Override
Response<String> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineResponse(null).apply(new Func0<Response<String>>() {
@Override
public Response<String> call() {
return jedisPipeline.get(key);
}
});
}
}.execute(key, OpName.GET);
}
}
@Override
public Response<Boolean> getbit(final String key, final long offset) {
return new PipelineOperation<Boolean>() {
@Override
Response<Boolean> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.getbit(key, offset);
}
}.execute(key, OpName.GETBIT);
}
@Override
public Response<String> getrange(final String key, final long startOffset, final long endOffset) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.getrange(key, startOffset, endOffset);
}
}.execute(key, OpName.GETRANGE);
}
@Override
public Response<String> getSet(final String key, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.getSet(key, value);
}
}.execute(key, OpName.GETSET);
} else {
return new PipelineCompressionOperation<String>() {
@Override
Response<String> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineResponse(null).apply(new Func0<Response<String>>() {
@Override
public Response<String> call() {
return jedisPipeline.getSet(key, compressValue(value));
}
});
}
}.execute(key, OpName.GETSET);
}
}
@Override
public Response<Long> hdel(final String key, final String... field) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hdel(key, field);
}
}.execute(key, OpName.HDEL);
}
@Override
public Response<Boolean> hexists(final String key, final String field) {
return new PipelineOperation<Boolean>() {
@Override
Response<Boolean> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hexists(key, field);
}
}.execute(key, OpName.HEXISTS);
}
@Override
public Response<String> hget(final String key, final String field) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hget(key, field);
}
}.execute(key, OpName.HGET);
} else {
return new PipelineCompressionOperation<String>() {
@Override
Response<String> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineResponse(null).apply(new Func0<Response<String>>() {
@Override
public Response<String> call() {
return jedisPipeline.hget(key, field);
}
});
}
}.execute(key, OpName.HGET);
}
}
/**
* This method is a BinaryRedisPipeline command which dyno does not yet properly
* support, therefore the interface is not yet implemented.
*/
public Response<byte[]> hget(final byte[] key, final byte[] field) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<byte[]>() {
@Override
Response<byte[]> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hget(key, field);
}
}.execute(key, OpName.HGET);
} else {
return new PipelineCompressionOperation<byte[]>() {
@Override
Response<byte[]> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineBinaryResponse(null).apply(new Func0<Response<byte[]>>() {
@Override
public Response<byte[]> call() {
return jedisPipeline.hget(key, field);
}
});
}
}.execute(key, OpName.HGET);
}
}
@Override
public Response<Map<String, String>> hgetAll(final String key) {
return new PipelineOperation<Map<String, String>>() {
@Override
Response<Map<String, String>> execute(Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.hgetAll(key);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.HGETALL.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.HGETALL);
}
public Response<Map<byte[], byte[]>> hgetAll(final byte[] key) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<Map<byte[], byte[]>>() {
@Override
Response<Map<byte[], byte[]>> execute(Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.hgetAll(key);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.HGETALL.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.HGETALL);
} else {
return new PipelineCompressionOperation<Map<byte[], byte[]>>() {
@Override
Response<Map<byte[], byte[]>> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineBinaryMapResponse(null).apply(new Func0<Response<Map<byte[], byte[]>>>() {
@Override
public Response<Map<byte[], byte[]>> call() {
return jedisPipeline.hgetAll(key);
}
});
}
}.execute(key, OpName.HGETALL);
}
}
@Override
public Response<Long> hincrBy(final String key, final String field, final long value) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hincrBy(key, field, value);
}
}.execute(key, OpName.HINCRBY);
}
/* not supported by RedisPipeline 2.7.3 */
public Response<Double> hincrByFloat(final String key, final String field, final double value) {
return new PipelineOperation<Double>() {
@Override
Response<Double> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hincrByFloat(key, field, value);
}
}.execute(key, OpName.HINCRBYFLOAT);
}
@Override
public Response<Set<String>> hkeys(final String key) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hkeys(key);
}
}.execute(key, OpName.HKEYS);
}
public Response<ScanResult<Map.Entry<String, String>>> hscan(final String key, int cursor) {
throw new UnsupportedOperationException("'HSCAN' cannot be called in pipeline");
}
@Override
public Response<Long> hlen(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hlen(key);
}
}.execute(key, OpName.HLEN);
}
/**
* This method is a BinaryRedisPipeline command which dyno does not yet properly
* support, therefore the interface is not yet implemented.
*/
public Response<List<byte[]>> hmget(final byte[] key, final byte[]... fields) {
return new PipelineOperation<List<byte[]>>() {
@Override
Response<List<byte[]>> execute(Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.hmget(key, fields);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.HMGET.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.HMGET);
}
@Override
public Response<List<String>> hmget(final String key, final String... fields) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<List<String>>() {
@Override
Response<List<String>> execute(Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.hmget(key, fields);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.HMGET.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.HMGET);
} else {
return new PipelineCompressionOperation<List<String>>() {
@Override
Response<List<String>> execute(final Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return new PipelineListResponse(null).apply(new Func0<Response<List<String>>>() {
@Override
public Response<List<String>> call() {
return jedisPipeline.hmget(key, fields);
}
});
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.HMGET.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.HGET);
}
}
/**
* This method is a BinaryRedisPipeline command which dyno does not yet properly
* support, therefore the interface is not yet implemented since only a few
* binary commands are present.
*/
public Response<String> hmset(final byte[] key, final Map<byte[], byte[]> hash) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.hmset(key, hash);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.HMSET.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.HMSET);
} else {
return new PipelineCompressionOperation<String>() {
@Override
Response<String> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineResponse(null).apply(new Func0<Response<String>>() {
@Override
public Response<String> call() {
return jedisPipeline.hmset(key, CollectionUtils.transform(hash,
new CollectionUtils.MapEntryTransform<byte[], byte[], byte[]>() {
@Override
public byte[] get(byte[] key, byte[] val) {
return compressValue(val);
}
}));
}
});
}
}.execute(key, OpName.HMSET);
}
}
@Override
public Response<String> hmset(final String key, final Map<String, String> hash) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.hmset(key, hash);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.HMSET.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.HMSET);
} else {
return new PipelineCompressionOperation<String>() {
@Override
Response<String> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineResponse(null).apply(new Func0<Response<String>>() {
@Override
public Response<String> call() {
return jedisPipeline.hmset(key, CollectionUtils.transform(hash,
new CollectionUtils.MapEntryTransform<String, String, String>() {
@Override
public String get(String key, String val) {
return compressValue(val);
}
}));
}
});
}
}.execute(key, OpName.HMSET);
}
}
@Override
public Response<Long> hset(final String key, final String field, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hset(key, field, value);
}
}.execute(key, OpName.HSET);
} else {
return new PipelineCompressionOperation<Long>() {
@Override
Response<Long> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineLongResponse(null).apply(new Func0<Response<Long>>() {
@Override
public Response<Long> call() {
return jedisPipeline.hset(key, field, compressValue(value));
}
});
}
}.execute(key, OpName.HSET);
}
}
@Override
public Response<Long> hset(String key, Map<String, String> hash) {
throw new UnsupportedOperationException("not yet implemented");
}
/**
* This method is a BinaryRedisPipeline command which dyno does not yet properly
* support, therefore the interface is not yet implemented.
*/
public Response<Long> hset(final byte[] key, final byte[] field, final byte[] value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hset(key, field, value);
}
}.execute(key, OpName.HSET);
} else {
return new PipelineCompressionOperation<Long>() {
@Override
Response<Long> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineLongResponse(null).apply(new Func0<Response<Long>>() {
@Override
public Response<Long> call() {
return jedisPipeline.hset(key, field, compressValue(value));
}
});
}
}.execute(key, OpName.HSET);
}
}
@Override
public Response<Long> hset(byte[] key, Map<byte[], byte[]> hash) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hsetnx(final String key, final String field, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hsetnx(key, field, value);
}
}.execute(key, OpName.HSETNX);
} else {
return new PipelineCompressionOperation<Long>() {
@Override
Response<Long> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineLongResponse(null).apply(new Func0<Response<Long>>() {
@Override
public Response<Long> call() {
return jedisPipeline.hsetnx(key, field, compressValue(value));
}
});
}
}.execute(key, OpName.HSETNX);
}
}
@Override
public Response<List<String>> hvals(final String key) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<List<String>>() {
@Override
Response<List<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hvals(key);
}
}.execute(key, OpName.HVALS);
} else {
return new PipelineCompressionOperation<List<String>>() {
@Override
Response<List<String>> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineListResponse(null).apply(new Func0<Response<List<String>>>() {
@Override
public Response<List<String>> call() {
return jedisPipeline.hvals(key);
}
});
}
}.execute(key, OpName.HVALS);
}
}
@Override
public Response<Long> incr(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.incr(key);
}
}.execute(key, OpName.INCR);
}
@Override
public Response<Long> incrBy(final String key, final long integer) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.incrBy(key, integer);
}
}.execute(key, OpName.INCRBY);
}
/* not supported by RedisPipeline 2.7.3 */
public Response<Double> incrByFloat(final String key, final double increment) {
return new PipelineOperation<Double>() {
@Override
Response<Double> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.incrByFloat(key, increment);
}
}.execute(key, OpName.INCRBYFLOAT);
}
@Override
public Response<String> psetex(String key, long milliseconds, String value) {
return null;
}
@Override
public Response<String> lindex(final String key, final long index) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.lindex(key, index);
}
}.execute(key, OpName.LINDEX);
}
@Override
public Response<Long> linsert(final String key, final ListPosition where, final String pivot, final String value) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.linsert(key, where, pivot, value);
}
}.execute(key, OpName.LINSERT);
}
@Override
public Response<Long> llen(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.llen(key);
}
}.execute(key, OpName.LLEN);
}
@Override
public Response<String> lpop(final String key) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.lpop(key);
}
}.execute(key, OpName.LPOP);
}
@Override
public Response<Long> lpush(final String key, final String... string) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.lpush(key, string);
}
}.execute(key, OpName.LPUSH);
}
@Override
public Response<Long> lpushx(final String key, final String... string) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.lpushx(key, string);
}
}.execute(key, OpName.LPUSHX);
}
@Override
public Response<List<String>> lrange(final String key, final long start, final long end) {
return new PipelineOperation<List<String>>() {
@Override
Response<List<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.lrange(key, start, end);
}
}.execute(key, OpName.LRANGE);
}
@Override
public Response<Long> lrem(final String key, final long count, final String value) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.lrem(key, count, value);
}
}.execute(key, OpName.LREM);
}
@Override
public Response<String> lset(final String key, final long index, final String value) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.lset(key, index, value);
}
}.execute(key, OpName.LSET);
}
@Override
public Response<String> ltrim(final String key, final long start, final long end) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.ltrim(key, start, end);
}
}.execute(key, OpName.LTRIM);
}
@Override
public Response<Long> move(final String key, final int dbIndex) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.move(key, dbIndex);
}
}.execute(key, OpName.MOVE);
}
@Override
public Response<Long> persist(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.persist(key);
}
}.execute(key, OpName.PERSIST);
}
/* not supported by RedisPipeline 2.7.3 */
public Response<String> rename(final String oldkey, final String newkey) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.rename(oldkey, newkey);
}
}.execute(oldkey, OpName.RENAME);
}
/* not supported by RedisPipeline 2.7.3 */
public Response<Long> renamenx(final String oldkey, final String newkey) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.renamenx(oldkey, newkey);
}
}.execute(oldkey, OpName.RENAMENX);
}
@Override
public Response<String> rpop(final String key) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.rpop(key);
}
}.execute(key, OpName.RPOP);
}
@Override
public Response<Long> rpush(final String key, final String... string) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.rpush(key, string);
}
}.execute(key, OpName.RPUSH);
}
@Override
public Response<Long> rpushx(final String key, final String... string) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.rpushx(key, string);
}
}.execute(key, OpName.RPUSHX);
}
@Override
public Response<Long> sadd(final String key, final String... member) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.sadd(key, member);
}
}.execute(key, OpName.SADD);
}
@Override
public Response<Long> scard(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.scard(key);
}
}.execute(key, OpName.SCARD);
}
@Override
public Response<Boolean> sismember(final String key, final String member) {
return new PipelineOperation<Boolean>() {
@Override
Response<Boolean> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.sismember(key, member);
}
}.execute(key, OpName.SISMEMBER);
}
@Override
public Response<String> set(final String key, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.set(key, value);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.SET.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.SET);
} else {
return new PipelineCompressionOperation<String>() {
@Override
Response<String> execute(final Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return new PipelineResponse(null).apply(new Func0<Response<String>>() {
@Override
public Response<String> call() {
return jedisPipeline.set(key, compressValue(value));
}
});
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.SET.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.SET);
}
}
@Override
public Response<Boolean> setbit(final String key, final long offset, final boolean value) {
return new PipelineOperation<Boolean>() {
@Override
Response<Boolean> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.setbit(key, offset, value);
}
}.execute(key, OpName.SETBIT);
}
@Override
public Response<String> setex(final String key, final int seconds, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.setex(key, seconds, value);
}
}.execute(key, OpName.SETEX);
} else {
return new PipelineCompressionOperation<String>() {
@Override
Response<String> execute(final Pipeline jedisPipeline) throws DynoException {
return new PipelineResponse(null).apply(new Func0<Response<String>>() {
@Override
public Response<String> call() {
return jedisPipeline.setex(key, seconds, compressValue(value));
}
});
}
}.execute(key, OpName.SETEX);
}
}
@Override
public Response<Long> setnx(final String key, final String value) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.setnx(key, value);
}
}.execute(key, OpName.SETNX);
}
@Override
public Response<Long> setrange(final String key, final long offset, final String value) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.setrange(key, offset, value);
}
}.execute(key, OpName.SETRANGE);
}
@Override
public Response<Set<String>> smembers(final String key) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.smembers(key);
}
}.execute(key, OpName.SMEMBERS);
}
@Override
public Response<List<String>> sort(final String key) {
return new PipelineOperation<List<String>>() {
@Override
Response<List<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.sort(key);
}
}.execute(key, OpName.SORT);
}
@Override
public Response<List<String>> sort(final String key, final SortingParams sortingParameters) {
return new PipelineOperation<List<String>>() {
@Override
Response<List<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.sort(key, sortingParameters);
}
}.execute(key, OpName.SORT);
}
@Override
public Response<String> spop(final String key) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.spop(key);
}
}.execute(key, OpName.SPOP);
}
@Override
public Response<Set<String>> spop(final String key, final long count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> srandmember(final String key) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.srandmember(key);
}
}.execute(key, OpName.SRANDMEMBER);
}
@Override
public Response<Long> srem(final String key, final String... member) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.srem(key, member);
}
}.execute(key, OpName.SREM);
}
/**
* This method is not supported by the BinaryRedisPipeline interface.
*/
public Response<ScanResult<String>> sscan(final String key, final int cursor) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline");
}
/**
* This method is not supported by the BinaryRedisPipeline interface.
*/
public Response<ScanResult<String>> sscan(final String key, final String cursor) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline");
}
@Override
public Response<Long> strlen(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.strlen(key);
}
}.execute(key, OpName.STRLEN);
}
@Override
public Response<String> substr(final String key, final int start, final int end) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.substr(key, start, end);
}
}.execute(key, OpName.SUBSTR);
}
@Override
public Response<Long> touch(String key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> ttl(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.ttl(key);
}
}.execute(key, OpName.TTL);
}
@Override
public Response<Long> pttl(String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.pttl(key);
}
}.execute(key, OpName.PTTL);
}
@Override
public Response<String> type(final String key) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.type(key);
}
}.execute(key, OpName.TYPE);
}
@Override
public Response<Long> zadd(final String key, final double score, final String member) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zadd(key, score, member);
}
}.execute(key, OpName.ZADD);
}
@Override
public Response<Long> zadd(final String key, final Map<String, Double> scoreMembers) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zadd(key, scoreMembers);
}
}.execute(key, OpName.ZADD);
}
@Override
public Response<Long> zcard(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zcard(key);
}
}.execute(key, OpName.ZCARD);
}
@Override
public Response<Long> zcount(final String key, final double min, final double max) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zcount(key, min, max);
}
}.execute(key, OpName.ZCOUNT);
}
@Override
public Response<Long> zcount(String key, String min, String max) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zcount(key, min, max);
}
}.execute(key, OpName.ZCOUNT);
}
@Override
public Response<Double> zincrby(final String key, final double score, final String member) {
return new PipelineOperation<Double>() {
@Override
Response<Double> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zincrby(key, score, member);
}
}.execute(key, OpName.ZINCRBY);
}
@Override
public Response<Set<String>> zrange(final String key, final long start, final long end) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrange(key, start, end);
}
}.execute(key, OpName.ZRANGE);
}
@Override
public Response<Set<String>> zrangeByScore(final String key, final double min, final double max) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrangeByScore(key, min, max);
}
}.execute(key, OpName.ZRANGEBYSCORE);
}
@Override
public Response<Set<String>> zrangeByScore(final String key, final String min, final String max) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrangeByScore(key, min, max);
}
}.execute(key, OpName.ZRANGEBYSCORE);
}
@Override
public Response<Set<String>> zrangeByScore(final String key, final double min, final double max, final int offset,
final int count) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrangeByScore(key, min, max, offset, count);
}
}.execute(key, OpName.ZRANGEBYSCORE);
}
@Override
public Response<Set<String>> zrangeByScore(String key, String min, String max, int offset, int count) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrangeByScore(key, min, max, offset, count);
}
}.execute(key, OpName.ZRANGEBYSCORE);
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(final String key, final double min, final double max) {
return new PipelineOperation<Set<Tuple>>() {
@Override
Response<Set<Tuple>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrangeByScoreWithScores(key, min, max);
}
}.execute(key, OpName.ZRANGEBYSCOREWITHSCORES);
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(final String key, final double min, final double max,
final int offset, final int count) {
return new PipelineOperation<Set<Tuple>>() {
@Override
Response<Set<Tuple>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrangeByScoreWithScores(key, min, max, offset, count);
}
}.execute(key, OpName.ZRANGEBYSCOREWITHSCORES);
}
@Override
public Response<Set<String>> zrevrangeByScore(final String key, final double max, final double min) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrangeByScore(key, max, min);
}
}.execute(key, OpName.ZREVRANGEBYSCORE);
}
@Override
public Response<Set<String>> zrevrangeByScore(final String key, final String max, final String min) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrangeByScore(key, max, min);
}
}.execute(key, OpName.ZREVRANGEBYSCORE);
}
@Override
public Response<Set<String>> zrevrangeByScore(final String key, final double max, final double min,
final int offset, final int count) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrangeByScore(key, max, min, offset, count);
}
}.execute(key, OpName.ZREVRANGEBYSCORE);
}
@Override
public Response<Set<String>> zrevrangeByScore(String key, String max, String min, int offset, int count) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrangeByScore(key, max, min, offset, count);
}
}.execute(key, OpName.ZREVRANGEBYSCORE);
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(final String key, final double max, final double min) {
return new PipelineOperation<Set<Tuple>>() {
@Override
Response<Set<Tuple>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrangeByScoreWithScores(key, max, min);
}
}.execute(key, OpName.ZREVRANGEBYSCOREWITHSCORES);
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(String key, String max, String min) {
return new PipelineOperation<Set<Tuple>>() {
@Override
Response<Set<Tuple>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrangeByScoreWithScores(key, max, min);
}
}.execute(key, OpName.ZREVRANGEBYSCOREWITHSCORES);
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(final String key, final double max, final double min,
final int offset, final int count) {
return new PipelineOperation<Set<Tuple>>() {
@Override
Response<Set<Tuple>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
}.execute(key, OpName.ZREVRANGEBYSCOREWITHSCORES);
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(String key, String max, String min, int offset, int count) {
return new PipelineOperation<Set<Tuple>>() {
@Override
Response<Set<Tuple>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
}.execute(key, OpName.ZREVRANGEBYSCOREWITHSCORES);
}
@Override
public Response<Set<Tuple>> zrangeWithScores(final String key, final long start, final long end) {
return new PipelineOperation<Set<Tuple>>() {
@Override
Response<Set<Tuple>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrangeWithScores(key, start, end);
}
}.execute(key, OpName.ZRANGEWITHSCORES);
}
@Override
public Response<Long> zrank(final String key, final String member) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrank(key, member);
}
}.execute(key, OpName.ZRANK);
}
@Override
public Response<Long> zrem(final String key, final String... member) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrem(key, member);
}
}.execute(key, OpName.ZREM);
}
@Override
public Response<Long> zremrangeByRank(final String key, final long start, final long end) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zremrangeByRank(key, start, end);
}
}.execute(key, OpName.ZREMRANGEBYRANK);
}
@Override
public Response<Long> zremrangeByScore(final String key, final double start, final double end) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zremrangeByScore(key, start, end);
}
}.execute(key, OpName.ZREMRANGEBYSCORE);
}
@Override
public Response<Long> zremrangeByScore(String key, String min, String max) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zremrangeByScore(key, min, max);
}
}.execute(key, OpName.ZREMRANGEBYSCORE);
}
@Override
public Response<Set<String>> zrevrange(final String key, final long start, final long end) {
return new PipelineOperation<Set<String>>() {
@Override
Response<Set<String>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrange(key, start, end);
}
}.execute(key, OpName.ZREVRANGE);
}
@Override
public Response<Set<Tuple>> zrevrangeWithScores(final String key, final long start, final long end) {
return new PipelineOperation<Set<Tuple>>() {
@Override
Response<Set<Tuple>> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrangeWithScores(key, start, end);
}
}.execute(key, OpName.ZREVRANGEWITHSCORES);
}
@Override
public Response<Long> zrevrank(final String key, final String member) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zrevrank(key, member);
}
}.execute(key, OpName.ZREVRANK);
}
@Override
public Response<Double> zscore(final String key, final String member) {
return new PipelineOperation<Double>() {
@Override
Response<Double> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zscore(key, member);
}
}.execute(key, OpName.ZSCORE);
}
/**
* This method is not supported by the BinaryRedisPipeline interface.
*/
public Response<ScanResult<Tuple>> zscan(final String key, final int cursor) {
throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline");
}
@Override
public Response<Long> zlexcount(String key, String min, String max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<String>> zrangeByLex(String key, String min, String max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<String>> zrangeByLex(String key, String min, String max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zremrangeByLex(String key, String start, String end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> bitcount(final String key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.bitcount(key);
}
}.execute(key, OpName.BITCOUNT);
}
@Override
public Response<Long> bitcount(final String key, final long start, final long end) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.bitcount(key, start, end);
}
}.execute(key, OpName.BITCOUNT);
}
/**** Binary Operations ****/
@Override
public Response<String> set(final byte[] key, final byte[] value) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.set(key, value);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.SET.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.SET);
}
@Override
public Response<Long> pfadd(String key, String... elements) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pfcount(String key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<Long>> bitfield(String key, String... arguments) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hstrlen(String key, String field) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.hstrlen(key, field);
}
}.execute(key, OpName.HSTRLEN);
}
@Override
public Response<byte[]> dump(String key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> restore(String key, int ttl, byte[] serializedValue) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> restoreReplace(String key, int ttl, byte[] serializedValue) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> migrate(String host, int port, String key, int destinationDB, int timeout) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<String>> zrevrangeByLex(String key, String max, String min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<String>> zrevrangeByLex(String key, String max, String min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> geoadd(String arg0, Map<String, GeoCoordinate> arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> geoadd(String arg0, double arg1, double arg2, String arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> geodist(String arg0, String arg1, String arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> geodist(String arg0, String arg1, String arg2, GeoUnit arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<String>> geohash(String arg0, String... arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoCoordinate>> geopos(String arg0, String... arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadius(String arg0, double arg1, double arg2, double arg3,
GeoUnit arg4) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusReadonly(String key, double longitude, double latitude, double radius, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadius(String arg0, double arg1, double arg2, double arg3, GeoUnit arg4,
GeoRadiusParam arg5) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusReadonly(String key, double longitude, double latitude, double radius, GeoUnit unit, GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMember(String arg0, String arg1, double arg2, GeoUnit arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMemberReadonly(String key, String member, double radius, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMember(String arg0, String arg1, double arg2, GeoUnit arg3,
GeoRadiusParam arg4) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMemberReadonly(String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> bitpos(String key, boolean value) {
return null;
}
@Override
public Response<Long> bitpos(String key, boolean value, BitPosParams params) {
return null;
}
@Override
public Response<String> set(String key, String value, SetParams params) {
return null;
}
@Override
public Response<List<String>> srandmember(String key, int count) {
return null;
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(String key, String min, String max) {
return null;
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(String key, String min, String max, int offset, int count) {
return null;
}
@Override
public Response<Long> objectRefcount(String key) {
return null;
}
@Override
public Response<String> objectEncoding(String key) {
return null;
}
@Override
public Response<Long> objectIdletime(String key) {
return null;
}
@Override
public Response<Long> zadd(String key, Map<String, Double> members, ZAddParams params) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zadd(key, members, params);
}
}.execute(key, OpName.ZADD);
}
public Response<Long> zadd(final String key, final double score, final String member, final ZAddParams params) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.zadd(key, score, member, params);
}
}.execute(key, OpName.ZADD);
}
@Override
public Response<Double> zincrby(String arg0, double arg1, String arg2, ZIncrByParams arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
public void sync() {
long startTime = System.nanoTime() / 1000;
try {
jedisPipeline.sync();
opMonitor.recordPipelineSync();
} catch (JedisConnectionException jce) {
String msg = "Failed sync() to host: " + getHostInfo();
pipelineEx.set(new FatalConnectionException(msg, jce).
setHost(connection == null ? Host.NO_HOST : connection.getHost()));
cpMonitor.incOperationFailure(connection == null ? null : connection.getHost(), jce);
throw jce;
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordLatency(duration, TimeUnit.MICROSECONDS);
discardPipeline(false);
releaseConnection();
}
}
public List<Object> syncAndReturnAll() {
long startTime = System.nanoTime() / 1000;
try {
List<Object> result = jedisPipeline.syncAndReturnAll();
opMonitor.recordPipelineSync();
return result;
} catch (JedisConnectionException jce) {
String msg = "Failed syncAndReturnAll() to host: " + getHostInfo();
pipelineEx.set(new FatalConnectionException(msg, jce).
setHost(connection == null ? Host.NO_HOST : connection.getHost()));
cpMonitor.incOperationFailure(connection == null ? null : connection.getHost(), jce);
throw jce;
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordLatency(duration, TimeUnit.MICROSECONDS);
discardPipeline(false);
releaseConnection();
}
}
private void discardPipeline(boolean recordLatency) {
try {
if (jedisPipeline != null) {
long startTime = System.nanoTime() / 1000;
jedisPipeline.sync();
if (recordLatency) {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordLatency(duration, TimeUnit.MICROSECONDS);
}
jedisPipeline = null;
}
} catch (Exception e) {
Logger.warn(String.format("Failed to discard jedis pipeline, %s", getHostInfo()), e);
}
}
private void releaseConnection() {
if (connection != null) {
try {
connection.getContext().reset();
connection.getParentConnectionPool().returnConnection(connection);
if (pipelineEx.get() != null) {
connPool.getHealthTracker().trackConnectionError(connection.getParentConnectionPool(),
pipelineEx.get());
pipelineEx.set(null);
}
connection = null;
} catch (Exception e) {
Logger.warn(String.format("Failed to return connection in Dyno Jedis Pipeline, %s", getHostInfo()), e);
}
}
}
public void discardPipelineAndReleaseConnection() {
opMonitor.recordPipelineDiscard();
discardPipeline(true);
releaseConnection();
}
@Override
public void close() throws Exception {
discardPipelineAndReleaseConnection();
}
private String getHostInfo() {
if (connection != null && connection.getHost() != null) {
return connection.getHost().toString();
}
return "unknown";
}
@Override
public Response<Long> append(byte[] key, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> blpop(byte[] arg) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> brpop(byte[] arg) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> decr(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> decrBy(final byte[] key, final long integer) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.decrBy(key, integer);
}
}.execute(key, OpName.DECRBY);
}
@Override
public Response<Long> del(final byte[] key) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.del(key);
}
}.execute(key, OpName.DEL);
}
@Override
public Response<Long> unlink(byte[] keys) {
return new PipelineOperation<Long>() {
@Override
Response<Long> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.unlink(keys);
}
}.execute(keys, OpName.UNLINK);
}
@Override
public Response<byte[]> echo(byte[] string) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Boolean> exists(final byte[] key) {
return new PipelineOperation<Boolean>() {
@Override
Response<Boolean> execute(final Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.exists(key);
}
}.execute(key, OpName.EXISTS);
}
@Override
public Response<Long> expire(byte[] key, int seconds) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pexpire(byte[] key, long milliseconds) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> expireAt(byte[] key, long unixTime) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pexpireAt(byte[] key, long millisecondsTimestamp) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> get(final byte[] key) {
return new PipelineOperation<byte[]>() {
@Override
Response<byte[]> execute(Pipeline jedisPipeline) throws DynoException {
long startTime = System.nanoTime() / 1000;
try {
return jedisPipeline.get(key);
} finally {
long duration = System.nanoTime() / 1000 - startTime;
opMonitor.recordSendLatency(OpName.GET.name(), duration, TimeUnit.MICROSECONDS);
}
}
}.execute(key, OpName.GET);
}
@Override
public Response<Boolean> getbit(byte[] key, long offset) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> getSet(final byte[] key, final byte[] value) {
return new PipelineOperation<byte[]>() {
@Override
Response<byte[]> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.getSet(key, value);
}
}.execute(key, OpName.GETSET);
}
@Override
public Response<byte[]> getrange(byte[] key, long startOffset, long endOffset) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hdel(byte[] key, byte[]... field) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Boolean> hexists(byte[] key, byte[] field) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hincrBy(byte[] key, byte[] field, long value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> hkeys(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hlen(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hsetnx(byte[] key, byte[] field, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> hvals(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> incr(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> incrBy(byte[] key, long integer) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> lindex(byte[] key, long index) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> linsert(byte[] key, ListPosition where, byte[] pivot, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> llen(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> lpop(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> lpush(byte[] key, byte[]... string) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> lpushx(byte[] key, byte[]... bytes) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> lrange(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> lrem(byte[] key, long count, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> lset(byte[] key, long index, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> ltrim(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> move(byte[] key, int dbIndex) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> persist(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> rpop(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> rpush(byte[] key, byte[]... string) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> rpushx(byte[] key, byte[]... string) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> sadd(byte[] key, byte[]... member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> scard(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Boolean> setbit(byte[] key, long offset, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> setrange(byte[] key, long offset, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> setex(final byte[] key, final int seconds, final byte[] value) {
return new PipelineOperation<String>() {
@Override
Response<String> execute(Pipeline jedisPipeline) throws DynoException {
return jedisPipeline.setex(key, seconds, value);
}
}.execute(key, OpName.SETEX);
}
@Override
public Response<Long> setnx(byte[] key, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> smembers(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Boolean> sismember(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> sort(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> sort(byte[] key, SortingParams sortingParameters) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> spop(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> spop(byte[] key, long count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> srandmember(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> srem(byte[] key, byte[]... member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> strlen(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> substr(byte[] key, int start, int end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> touch(byte[] keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> ttl(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pttl(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> type(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zadd(byte[] key, double score, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zadd(byte[] key, double score, byte[] member, ZAddParams params) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zadd(byte[] key, Map<byte[], Double> scoreMembers) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zadd(byte[] key, Map<byte[], Double> scoreMembers, ZAddParams params) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zcard(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zcount(byte[] key, double min, double max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zcount(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> zincrby(byte[] key, double score, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> zincrby(byte[] key, double score, byte[] member, ZIncrByParams params) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrange(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByScore(byte[] key, double min, double max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByScore(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByScore(byte[] key, double min, double max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByScore(byte[] key, byte[] min, byte[] max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(byte[] key, double min, double max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(byte[] key, double min, double max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(byte[] key, byte[] min, byte[] max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByScore(byte[] key, double max, double min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByScore(byte[] key, byte[] max, byte[] min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByScore(byte[] key, double max, double min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByScore(byte[] key, byte[] max, byte[] min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(byte[] key, double max, double min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(byte[] key, byte[] max, byte[] min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(byte[] key, double max, double min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(byte[] key, byte[] max, byte[] min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrangeWithScores(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zrank(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zrem(byte[] key, byte[]... member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zremrangeByRank(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zremrangeByScore(byte[] key, double start, double end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zremrangeByScore(byte[] key, byte[] start, byte[] end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrange(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrevrangeWithScores(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zrevrank(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> zscore(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zlexcount(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByLex(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByLex(byte[] key, byte[] min, byte[] max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByLex(byte[] key, byte[] max, byte[] min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByLex(byte[] key, byte[] max, byte[] min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zremrangeByLex(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> bitcount(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> bitcount(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pfadd(byte[] key, byte[]... elements) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pfcount(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> dump(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> restore(byte[] key, int ttl, byte[] serializedValue) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> restoreReplace(byte[] key, int ttl, byte[] serializedValue) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> migrate(String host, int port, byte[] key, int destinationDB, int timeout) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> geoadd(byte[] key, double longitude, double latitude, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> geoadd(byte[] key, Map<byte[], GeoCoordinate> memberCoordinateMap) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> geodist(byte[] key, byte[] member1, byte[] member2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> geodist(byte[] key, byte[] member1, byte[] member2, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> geohash(byte[] key, byte[]... members) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoCoordinate>> geopos(byte[] key, byte[]... members) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadius(byte[] key, double longitude, double latitude, double radius,
GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusReadonly(byte[] key, double longitude, double latitude, double radius, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadius(byte[] key, double longitude, double latitude, double radius,
GeoUnit unit, GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusReadonly(byte[] key, double longitude, double latitude, double radius, GeoUnit unit, GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMember(byte[] key, byte[] member, double radius, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMemberReadonly(byte[] key, byte[] member, double radius, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMember(byte[] key, byte[] member, double radius, GeoUnit unit,
GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMemberReadonly(byte[] key, byte[] member, double radius, GeoUnit unit, GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<Long>> bitfield(byte[] key, byte[]... elements) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hstrlen(byte[] key, byte[] field) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> bitpos(byte[] key, boolean value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> bitpos(byte[] key, boolean value, BitPosParams params) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> set(byte[] key, byte[] value, SetParams params) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> srandmember(byte[] key, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> objectRefcount(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> objectEncoding(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> objectIdletime(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> incrByFloat(byte[] key, double increment) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> psetex(byte[] key, long milliseconds, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> hincrByFloat(byte[] key, byte[] field, double increment) {
throw new UnsupportedOperationException("not yet implemented");
}
}
| 6,120 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/DynoConfigCommand.java |
/*******************************************************************************
* Copyright 2018 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.jedis;
import lombok.Getter;
import redis.clients.jedis.commands.ProtocolCommand;
import redis.clients.jedis.util.SafeEncoder;
@Getter
public enum DynoConfigCommand implements ProtocolCommand {
CONN_CONSISTENCY("DYNO_CONFIG:CONN_CONSISTENCY");
private final byte[] raw;
DynoConfigCommand(String opName) {
this.raw = SafeEncoder.encode(opName);
}
} | 6,121 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/DynoDualWriterClient.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.ConnectionPool;
import com.netflix.dyno.connectionpool.ConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.OperationResult;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolImpl;
import com.netflix.dyno.contrib.DynoOPMonitor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
/**
* Client that provides 'dual-write' functionality. This is useful when clients wish to move from one dynomite
* cluster to another, for example to upgrade cluster capacity.
*
* @author jcacciatore
*/
public class DynoDualWriterClient extends DynoJedisClient {
private static final Logger logger = LoggerFactory.getLogger(DynoDualWriterClient.class);
private static ExecutorService executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
private final String appName;
private final ConnectionPool<Jedis> connPool;
// Client used for dual-write functionality.
private final DynoJedisClient shadowClient;
// Used to control traffic flow to the dual-write cluster
private final Dial dial;
private final AtomicReference<DynoJedisPipelineMonitor> pipelineMonitor = new AtomicReference<>();
public DynoDualWriterClient(String name, String clusterName,
ConnectionPool<Jedis> pool,
DynoOPMonitor operationMonitor,
ConnectionPoolMonitor connectionPoolMonitor,
DynoJedisClient shadowClient) {
this(name, clusterName, pool, operationMonitor, connectionPoolMonitor, shadowClient,
new TimestampDial(pool.getConfiguration().getDualWritePercentage()));
}
public DynoDualWriterClient(String name, String clusterName,
ConnectionPool<Jedis> pool,
DynoOPMonitor operationMonitor,
ConnectionPoolMonitor connectionPoolMonitor,
DynoJedisClient shadowClient,
Dial dial) {
super(name, clusterName, pool, operationMonitor, connectionPoolMonitor);
this.appName = name;
this.connPool = pool;
this.shadowClient = shadowClient;
this.dial = dial;
}
public Dial getDial() {
return dial;
}
private DynoJedisPipelineMonitor checkAndInitPipelineMonitor() {
if (pipelineMonitor.get() != null) {
return pipelineMonitor.get();
}
int flushTimerFrequency = this.connPool.getConfiguration().getTimingCountersResetFrequencySeconds();
DynoJedisPipelineMonitor plMonitor = new DynoJedisPipelineMonitor(appName, flushTimerFrequency);
boolean success = pipelineMonitor.compareAndSet(null, plMonitor);
if (success) {
pipelineMonitor.get().init();
}
return pipelineMonitor.get();
}
@Override
public DynoDualWriterPipeline pipelined() {
return new DynoDualWriterPipeline(appName, getConnPool(), checkAndInitPipelineMonitor(), getConnPool().getMonitor(),
shadowClient.getConnPool(), dial);
}
private <R> Future<OperationResult<R>> writeAsync(final String key, Callable<OperationResult<R>> func) {
if (sendShadowRequest(key)) {
try {
return executor.submit(func);
} catch (Throwable th) {
opMonitor.recordFailure("shadowPool_submit", th.getMessage());
}
// if we need to do any other processing (logging, etc) now's the time...
}
return null;
}
/**
* writeAsync() for binary commands
*/
private <R> Future<OperationResult<R>> writeAsync(final byte[] key, Callable<OperationResult<R>> func) {
if (sendShadowRequest(key)) {
try {
return executor.submit(func);
} catch (Throwable th) {
opMonitor.recordFailure("shadowPool_submit", th.getMessage());
}
// if we need to do any other processing (logging, etc) now's the time...
}
return null;
}
/**
* Returns true if the connection pool
* <li>Is NOT idle</li>
* <li>Has active pools (the shadow cluster may disappear at any time and we don't want to bloat logs)</li>
* <li>The key is in range in the dial</li>
* <p>
* The idle check is necessary since there may be active host pools however the shadow client may not be able to
* connect to them, for example, if security groups are not configured properly.
*/
private boolean sendShadowRequest(String key) {
return this.getConnPool().getConfiguration().isDualWriteEnabled() &&
!this.getConnPool().isIdle() &&
this.getConnPool().getActivePools().size() > 0 &&
dial.isInRange(key);
}
private boolean sendShadowRequest(byte[] key) {
return this.getConnPool().getConfiguration().isDualWriteEnabled() &&
!this.getConnPool().isIdle() &&
this.getConnPool().getActivePools().size() > 0 &&
dial.isInRange(key);
}
public interface Dial {
/**
* Returns true if the given value is in range, false otherwise
*/
boolean isInRange(String key);
boolean isInRange(byte[] key);
void setRange(int range);
}
/**
* Default Dial implementation that presumes no knowledge of the key value
* and simply uses a timestamp to determine inclusion/exclusion
*/
private static class TimestampDial implements Dial {
private final AtomicInteger range = new AtomicInteger(1);
public TimestampDial(int range) {
this.range.set(range);
}
@Override
public boolean isInRange(String key) {
return range.get() > (System.currentTimeMillis() % 100);
}
@Override
public boolean isInRange(byte[] key) {
return range.get() > (System.currentTimeMillis() % 100);
}
@Override
public void setRange(int range) {
this.range.set(range);
}
}
//----------------------------- JEDIS COMMANDS --------------------------------------
@Override
public Long append(final String key, final String value) {
return this.d_append(key, value).getResult();
}
@Override
public OperationResult<Long> d_append(final String key, final String value) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_append(key, value);
}
});
return DynoDualWriterClient.super.d_append(key, value);
}
@Override
public OperationResult<String> d_hmset(final String key, final Map<String, String> hash) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_hmset(key, hash);
}
});
return DynoDualWriterClient.super.d_hmset(key, hash);
}
@Override
public Long sadd(final String key, final String... members) {
return this.d_sadd(key, members).getResult();
}
@Override
public OperationResult<Long> d_sadd(final String key, final String... members) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_sadd(key, members);
}
});
return DynoDualWriterClient.super.d_sadd(key, members);
}
@Override
public Long hset(final String key, final String field, final String value) {
return this.d_hset(key, field, value).getResult();
}
@Override
public OperationResult<Long> d_hset(final String key, final String field, final String value) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_hset(key, field, value);
}
});
return DynoDualWriterClient.super.d_hset(key, field, value);
}
@Override
public String set(final String key, final String value) {
return this.d_set(key, value).getResult();
}
@Override
public OperationResult<String> d_set(final String key, final String value) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_set(key, value);
}
});
return DynoDualWriterClient.super.d_set(key, value);
}
@Override
public String setex(final String key, int seconds, String value) {
return this.d_setex(key, seconds, value).getResult();
}
@Override
public OperationResult<String> d_setex(final String key, final Integer seconds, final String value) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_setex(key, seconds, value);
}
});
return DynoDualWriterClient.super.d_setex(key, seconds, value);
}
@Override
public Long del(final String key) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_del(key);
}
});
return DynoDualWriterClient.super.del(key);
}
@Override
public Long expire(final String key, final int seconds) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_expire(key, seconds);
}
});
return DynoDualWriterClient.super.expire(key, seconds);
}
@Override
public Long expireAt(final String key, final long unixTime) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_expireAt(key, unixTime);
}
});
return DynoDualWriterClient.super.expireAt(key, unixTime);
}
@Override
public String getSet(final String key, final String value) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_getSet(key, value);
}
});
return DynoDualWriterClient.super.getSet(key, value);
}
@Override
public Long hdel(final String key, final String... fields) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_hdel(key, fields);
}
});
return DynoDualWriterClient.super.hdel(key);
}
@Override
public Long hincrBy(final String key, final String field, final long value) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_hincrBy(key, field, value);
}
});
return DynoDualWriterClient.super.hincrBy(key, field, value);
}
@Override
public Double hincrByFloat(final String key, final String field, final double value) {
writeAsync(key, new Callable<OperationResult<Double>>() {
@Override
public OperationResult<Double> call() throws Exception {
return shadowClient.d_hincrByFloat(key, field, value);
}
});
return DynoDualWriterClient.super.hincrByFloat(key, field, value);
}
@Override
public Long hsetnx(final String key, final String field, final String value) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_hsetnx(key, field, value);
}
});
return DynoDualWriterClient.super.hsetnx(key, field, value);
}
@Override
public Long incr(final String key) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_incr(key);
}
});
return DynoDualWriterClient.super.incr(key);
}
@Override
public Long incrBy(final String key, final long delta) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_incrBy(key, delta);
}
});
return DynoDualWriterClient.super.incrBy(key, delta);
}
@Override
public Double incrByFloat(final String key, final double increment) {
writeAsync(key, new Callable<OperationResult<Double>>() {
@Override
public OperationResult<Double> call() throws Exception {
return shadowClient.d_incrByFloat(key, increment);
}
});
return DynoDualWriterClient.super.incrByFloat(key, increment);
}
@Override
public String lpop(final String key) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_lpop(key);
}
});
return DynoDualWriterClient.super.lpop(key);
}
@Override
public Long lpush(final String key, final String... values) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_lpush(key, values);
}
});
return DynoDualWriterClient.super.lpush(key, values);
}
@Override
public Long lrem(final String key, final long count, final String value) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_lrem(key, count, value);
}
});
return DynoDualWriterClient.super.lrem(key, count, value);
}
@Override
public String lset(final String key, final long count, final String value) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_lset(key, count, value);
}
});
return DynoDualWriterClient.super.lset(key, count, value);
}
@Override
public String ltrim(final String key, final long start, final long end) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_ltrim(key, start, end);
}
});
return DynoDualWriterClient.super.ltrim(key, start, end);
}
@Override
public Long persist(final String key) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_persist(key);
}
});
return DynoDualWriterClient.super.persist(key);
}
@Override
public Long pexpireAt(final String key, final long millisecondsTimestamp) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_pexpireAt(key, millisecondsTimestamp);
}
});
return DynoDualWriterClient.super.pexpireAt(key, millisecondsTimestamp);
}
@Override
public Long pttl(final String key) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_pttl(key);
}
});
return DynoDualWriterClient.super.pttl(key);
}
@Override
public String rename(final String oldkey, final String newkey) {
writeAsync(oldkey, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_rename(oldkey, oldkey);
}
});
return DynoDualWriterClient.super.rename(oldkey, oldkey);
}
@Override
public String rpop(final String key) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_rpop(key);
}
});
return DynoDualWriterClient.super.rpop(key);
}
@Override
public Long scard(final String key) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_scard(key);
}
});
return DynoDualWriterClient.super.scard(key);
}
@Override
public Boolean setbit(final String key, final long offset, final boolean value) {
writeAsync(key, new Callable<OperationResult<Boolean>>() {
@Override
public OperationResult<Boolean> call() throws Exception {
return shadowClient.d_setbit(key, offset, value);
}
});
return DynoDualWriterClient.super.setbit(key, offset, value);
}
@Override
public Boolean setbit(final String key, final long offset, final String value) {
writeAsync(key, new Callable<OperationResult<Boolean>>() {
@Override
public OperationResult<Boolean> call() throws Exception {
return shadowClient.d_setbit(key, offset, value);
}
});
return DynoDualWriterClient.super.setbit(key, offset, value);
}
@Override
public Long setnx(final String key, final String value) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_setnx(key, value);
}
});
return DynoDualWriterClient.super.setnx(key, value);
}
@Override
public Long setrange(final String key, final long offset, final String value) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_setrange(key, offset, value);
}
});
return DynoDualWriterClient.super.setrange(key, offset, value);
}
@Override
public Set<String> smembers(final String key) {
writeAsync(key, new Callable<OperationResult<Set<String>>>() {
@Override
public OperationResult<Set<String>> call() throws Exception {
return shadowClient.d_smembers(key);
}
});
return DynoDualWriterClient.super.smembers(key);
}
@Override
public Long smove(final String srckey, final String dstkey, final String member) {
writeAsync(srckey, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_smove(srckey, dstkey, member);
}
});
return DynoDualWriterClient.super.smove(srckey, dstkey, member);
}
@Override
public List<String> sort(final String key) {
writeAsync(key, new Callable<OperationResult<List<String>>>() {
@Override
public OperationResult<List<String>> call() throws Exception {
return shadowClient.d_sort(key);
}
});
return DynoDualWriterClient.super.sort(key);
}
@Override
public String spop(final String key) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_spop(key);
}
});
return DynoDualWriterClient.super.spop(key);
}
@Override
public Long srem(final String key, final String... members) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_srem(key, members);
}
});
return DynoDualWriterClient.super.srem(key, members);
}
@Override
public ScanResult<String> sscan(final String key, final String cursor) {
writeAsync(key, new Callable<OperationResult<ScanResult<String>>>() {
@Override
public OperationResult<ScanResult<String>> call() throws Exception {
return shadowClient.d_sscan(key, cursor);
}
});
return DynoDualWriterClient.super.sscan(key, cursor);
}
@Override
public ScanResult<String> sscan(final String key, final String cursor, final ScanParams params) {
writeAsync(key, new Callable<OperationResult<ScanResult<String>>>() {
@Override
public OperationResult<ScanResult<String>> call() throws Exception {
return shadowClient.d_sscan(key, cursor, params);
}
});
return DynoDualWriterClient.super.sscan(key, cursor, params);
}
@Override
public Long ttl(final String key) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_ttl(key);
}
});
return DynoDualWriterClient.super.ttl(key);
}
@Override
public Long zadd(final String key, final double score, final String member) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_zadd(key, score, member);
}
});
return DynoDualWriterClient.super.zadd(key, score, member);
}
@Override
public Long zadd(final String key, final Map<String, Double> scoreMembers) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_zadd(key, scoreMembers);
}
});
return DynoDualWriterClient.super.zadd(key, scoreMembers);
}
@Override
public Double zincrby(final String key, final double score, final String member) {
writeAsync(key, new Callable<OperationResult<Double>>() {
@Override
public OperationResult<Double> call() throws Exception {
return shadowClient.d_zincrby(key, score, member);
}
});
return DynoDualWriterClient.super.zincrby(key, score, member);
}
@Override
public Long zrem(final String key, final String... member) {
writeAsync(key, new Callable<OperationResult<Long>>() {
@Override
public OperationResult<Long> call() throws Exception {
return shadowClient.d_zrem(key, member);
}
});
return DynoDualWriterClient.super.zrem(key, member);
}
@Override
public List<String> blpop(final int timeout, final String key) {
writeAsync(key, new Callable<OperationResult<List<String>>>() {
@Override
public OperationResult<List<String>> call() throws Exception {
return shadowClient.d_blpop(timeout, key);
}
});
return DynoDualWriterClient.super.blpop(timeout, key);
}
@Override
public List<String> brpop(final int timeout, final String key) {
writeAsync(key, new Callable<OperationResult<List<String>>>() {
@Override
public OperationResult<List<String>> call() throws Exception {
return shadowClient.d_brpop(timeout, key);
}
});
return DynoDualWriterClient.super.brpop(timeout, key);
}
/******************* Jedis Dual write for binary commands **************/
@Override
public String set(final byte[] key, final byte[] value) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_set(key, value);
}
});
return DynoDualWriterClient.super.set(key, value);
}
@Override
public String setex(final byte[] key, final int seconds, final byte[] value) {
writeAsync(key, new Callable<OperationResult<String>>() {
@Override
public OperationResult<String> call() throws Exception {
return shadowClient.d_setex(key, seconds, value);
}
});
return DynoDualWriterClient.super.setex(key, seconds, value);
}
}
| 6,122 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/DynoJedisClient.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.jedis;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.netflix.discovery.DiscoveryClient;
import com.netflix.discovery.EurekaClient;
import com.netflix.dyno.connectionpool.CompressionOperation;
import com.netflix.dyno.connectionpool.ConnectionContext;
import com.netflix.dyno.connectionpool.ConnectionPool;
import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration;
import com.netflix.dyno.connectionpool.ConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.CursorBasedResult;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.MultiKeyCompressionOperation;
import com.netflix.dyno.connectionpool.OperationResult;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.TokenRackMapper;
import com.netflix.dyno.connectionpool.TopologyView;
import com.netflix.dyno.connectionpool.exception.DynoConnectException;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolImpl;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import com.netflix.dyno.connectionpool.impl.utils.ZipUtils;
import com.netflix.dyno.contrib.ArchaiusConnectionPoolConfiguration;
import com.netflix.dyno.contrib.DynoCPMonitor;
import com.netflix.dyno.contrib.DynoOPMonitor;
import com.netflix.dyno.contrib.EurekaHostsSupplier;
import com.netflix.dyno.jedis.operation.BaseKeyOperation;
import com.netflix.dyno.jedis.operation.MultiKeyOperation;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import redis.clients.jedis.*;
import redis.clients.jedis.commands.BinaryJedisCommands;
import redis.clients.jedis.commands.JedisCommands;
import redis.clients.jedis.commands.MultiKeyBinaryCommands;
import redis.clients.jedis.commands.MultiKeyCommands;
import redis.clients.jedis.commands.ScriptingCommands;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
import javax.net.ssl.SSLSocketFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import static com.netflix.dyno.connectionpool.ConnectionPoolConfiguration.CompressionStrategy;
public class DynoJedisClient implements JedisCommands, BinaryJedisCommands, MultiKeyCommands,
ScriptingCommands, MultiKeyBinaryCommands, DynoJedisCommands {
private static final Logger Logger = org.slf4j.LoggerFactory.getLogger(DynoJedisClient.class);
private static final String DYNO_EXIPREHASH_METADATA_KEYPREFIX = "_metadata:";
private final String appName;
private final String clusterName;
private final ConnectionPool<Jedis> connPool;
private final AtomicReference<DynoJedisPipelineMonitor> pipelineMonitor = new AtomicReference<DynoJedisPipelineMonitor>();
protected final DynoOPMonitor opMonitor;
protected final ConnectionPoolMonitor cpMonitor;
public DynoJedisClient(String name, String clusterName, ConnectionPool<Jedis> pool, DynoOPMonitor operationMonitor,
ConnectionPoolMonitor cpMonitor) {
this.appName = name;
this.clusterName = clusterName;
this.connPool = pool;
this.opMonitor = operationMonitor;
this.cpMonitor = cpMonitor;
}
public ConnectionPoolImpl<Jedis> getConnPool() {
return (ConnectionPoolImpl<Jedis>) connPool;
}
public String getApplicationName() {
return appName;
}
public String getClusterName() {
return clusterName;
}
/**
* The following commands are supported
*
* <ul>
* <lh>String Operations</lh>
* <li>{@link #get(String) GET}</li>
* <li>{@link #getSet(String, String) GETSET}</li>
* <li>{@link #set(String, String) SET}</li>
* <li>{@link #setex(String, int, String) SETEX}</li>
* <li>{@link #psetex(String, long, String) PSETEX)</li>
* </ul>
* <ul>
* <lh>Hash Operations</lh>
* <li>{@link #hget(String, String) HGET}</li>
* <li>{@link #hgetAll(String) HGETALL}</li>
* <li>{@link #hmget(String, String...) HMGET}</li>
* <li>{@link #hmset(String, Map) HMSET}</li>
* <li>{@link #hscan(String, String) HSCAN}</li>
* <li>{@link #hset(String, String, String) HSET}</li>
* <li>{@link #hsetnx(String, String, String) HSETNX}</li>
* <li>{@link #hvals(String) HVALS}</li>
* </ul>
*
* <ul>
* <li>{@link #get(byte[]) GET}</li>
* <li>{@link #set(byte[], byte[]) SET}</li>
* <li>{@link #setex(byte[], int, byte[]) SETEX}</li>
* </ul>
*
* @param <T> the parameterized type
*/
private abstract class CompressionValueOperation<T> extends BaseKeyOperation<T>
implements CompressionOperation<Jedis, T> {
private CompressionValueOperation(String k, OpName o) {
super(k, o);
}
/**
* Compresses the value based on the threshold defined by
* {@link ConnectionPoolConfiguration#getValueCompressionThreshold()}
*
* @param value
* @return
*/
@Override
public String compressValue(String value, ConnectionContext ctx) {
String result = value;
int thresholdBytes = connPool.getConfiguration().getValueCompressionThreshold();
try {
// prefer speed over accuracy here so rather than using
// getBytes() to get the actual size
// just estimate using 2 bytes per character
if ((2 * value.length()) > thresholdBytes) {
result = ZipUtils.compressStringToBase64String(value);
ctx.setMetadata("compression", true);
}
} catch (IOException e) {
Logger.warn(
"UNABLE to compress [" + value + "] for key [" + getStringKey() + "]; sending value uncompressed");
}
return result;
}
@Override
public String decompressValue(String value, ConnectionContext ctx) {
try {
if (ZipUtils.isCompressed(value)) {
ctx.setMetadata("decompression", true);
return ZipUtils.decompressFromBase64String(value);
}
} catch (IOException e) {
Logger.warn("Unable to decompress value [" + value + "]");
}
return value;
}
}
/**
* The following commands are supported
*
* <ul>
* <lh>String Operations</lh>
* <li>{@link #mget(String...) MGET}</li>
* <li>{@link #mset(String...) MSET}</li>
* <li>{@link #msetnx(String...) MSETNX}</li>
* </ul>
*
* @param <T> the parameterized type
*/
private abstract class CompressionValueMultiKeyOperation<T> extends MultiKeyOperation<T>
implements MultiKeyCompressionOperation<Jedis, T> {
private CompressionValueMultiKeyOperation(List keys, OpName o) {
super(keys, o);
}
/**
* Accepts a set of keys and values and compresses the value based on the threshold defined by
* {@link ConnectionPoolConfiguration#getValueCompressionThreshold()}
*
* @param ctx and keyValues
* @return
*/
@Override
public String[] compressMultiKeyValue(ConnectionContext ctx, String... keyValues) {
List<String> items = Arrays.asList(keyValues);
List<String> newItems = new ArrayList<String>();
for (int i = 0; i < items.size(); i++) {
/*
* String... keyValues is a List of keys and values.
* The value always comes second and this is the one
* we want to compress.
*/
if (i % 2 == 0) {
String value = items.get(i);
try {
if ((2 * value.length()) > connPool.getConfiguration().getValueCompressionThreshold()) {
newItems.add(i, ZipUtils.compressStringToBase64String(value));
ctx.setMetadata("compression", true);
}
} catch (IOException e) {
Logger.warn(
"UNABLE to compress [" + value + "] for key [" + getStringKey() + "]; sending value uncompressed");
}
} else {
newItems.add(items.get(i));
}
}
return (String[]) newItems.toArray();
}
@Override
public String decompressValue(ConnectionContext ctx, String value) {
try {
if (ZipUtils.isCompressed(value)) {
ctx.setMetadata("decompression", true);
return ZipUtils.decompressFromBase64String(value);
}
} catch (IOException e) {
Logger.warn("Unable to decompress value [" + value + "]");
}
return value;
}
}
public TopologyView getTopologyView() {
return this.getConnPool();
}
public <R> OperationResult<R> moduleCommand(JedisGenericOperation<R> handler) {
return connPool.executeWithFailover(handler);
}
@Override
public Long append(final String key, final String value) {
return d_append(key, value).getResult();
}
public OperationResult<Long> d_append(final String key, final String value) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.APPEND) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.append(key, value);
}
});
}
@Override
public Long decr(final String key) {
return d_decr(key).getResult();
}
public OperationResult<Long> d_decr(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.DECR) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.decr(key);
}
});
}
@Override
public Long decrBy(final String key, final long delta) {
return d_decrBy(key, delta).getResult();
}
public OperationResult<Long> d_decrBy(final String key, final Long delta) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.DECRBY) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.decrBy(key, delta);
}
});
}
@Override
public Long del(final String key) {
return d_del(key).getResult();
}
@Override
public Long unlink(String key) {
return d_unlink(key).getResult();
}
public OperationResult<Long> d_unlink(String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.UNLINK) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.unlink(key);
}
});
}
public OperationResult<Long> d_del(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.DEL) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.del(key);
}
});
}
public byte[] dump(final String key) {
return d_dump(key).getResult();
}
@Override
public String restore(String key, int ttl, byte[] serializedValue) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String restoreReplace(String key, int ttl, byte[] serializedValue) {
throw new UnsupportedOperationException("not yet implemented");
}
public OperationResult<byte[]> d_dump(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<byte[]>(key, OpName.DUMP) {
@Override
public byte[] execute(Jedis client, ConnectionContext state) {
return client.dump(key);
}
});
}
@Override
public Boolean exists(final String key) {
return d_exists(key).getResult();
}
public OperationResult<Boolean> d_exists(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Boolean>(key, OpName.EXISTS) {
@Override
public Boolean execute(Jedis client, ConnectionContext state) {
return client.exists(key);
}
});
}
@Override
public Long expire(final String key, final int seconds) {
return d_expire(key, seconds).getResult();
}
public OperationResult<Long> d_expire(final String key, final int seconds) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.EXPIRE) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.expire(key, seconds);
}
});
}
@Override
public Long expireAt(final String key, final long unixTime) {
return d_expireAt(key, unixTime).getResult();
}
public OperationResult<Long> d_expireAt(final String key, final long unixTime) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.EXPIREAT) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.expireAt(key, unixTime);
}
});
}
@Override
public Object eval(String script, int keyCount, String... params) {
return d_eval(script, keyCount, params).getResult();
}
public OperationResult<Object> d_eval(final String script, final int keyCount, final String... params) {
if (keyCount == 0) {
throw new DynoException("Need at least one key in script");
}
return connPool.executeWithFailover(new BaseKeyOperation<Object>(params[0], OpName.EVAL) {
@Override
public Object execute(Jedis client, ConnectionContext state) {
return client.eval(script, keyCount, params);
}
});
}
@Override
public Object eval(String script, List<String> keys, List<String> args) {
String[] params = ArrayUtils.addAll(keys.toArray(new String[0]), args.toArray(new String[0]));
return eval(script, keys.size(), params);
}
@Override
public Object eval(String script) {
return eval(script, 0);
}
@Override
public Object evalsha(String sha1, int keyCount, String... params) {
return d_evalsha(sha1, keyCount, params).getResult();
}
public OperationResult<Object> d_evalsha(String sha1, int keyCount, String... params) {
if (keyCount == 0) {
throw new DynoException("Need at least one key in script");
}
return connPool.executeWithFailover(new BaseKeyOperation<Object>(params[0], OpName.EVALSHA) {
@Override
public Object execute(Jedis client, ConnectionContext state) {
return client.evalsha(sha1, keyCount, params);
}
});
}
@Override
public Object evalsha(String sha1, List<String> keys, List<String> args) {
String[] params = ArrayUtils.addAll(keys.toArray(new String[0]), args.toArray(new String[0]));
return evalsha(sha1, keys.size(), params);
}
@Override
public Object evalsha(String sha1) {
return evalsha(sha1, 0);
}
@Override
public Boolean scriptExists(String sha1) {
return d_scriptExists(sha1).getResult();
}
public OperationResult<Boolean> d_scriptExists(String sha1) {
return connPool.executeWithFailover(new BaseKeyOperation<Boolean>(sha1, OpName.SCRIPT_EXISTS) {
@Override
public Boolean execute(Jedis client, ConnectionContext state) throws DynoException {
return client.scriptExists(sha1);
}
});
}
@Override
public List<Boolean> scriptExists(String... sha1) {
return scriptExists(sha1);
}
@Override
public String scriptLoad(String script) {
return d_scriptLoad(script).getResult();
}
public OperationResult<String> d_scriptLoad(final String script) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(script, OpName.SCRIPT_LOAD) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.scriptLoad(script);
}
});
}
public String scriptFlush() {
return d_scriptFlush().getResult();
}
public OperationResult<String> d_scriptFlush() {
return connPool.executeWithFailover(new BaseKeyOperation<String>("", OpName.SCRIPT_FLUSH) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.scriptFlush();
}
});
}
public String scriptKill() {
return d_scriptKill().getResult();
}
public OperationResult<String> d_scriptKill() {
return connPool.executeWithFailover(new BaseKeyOperation<String>("", OpName.SCRIPT_KILL) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.scriptKill();
}
});
}
@Override
public String get(final String key) {
return d_get(key).getResult();
}
public OperationResult<String> d_get(final String key) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.GET) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.get(key);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<String>(key, OpName.GET) {
@Override
public String execute(final Jedis client, final ConnectionContext state) throws DynoException {
return decompressValue(client.get(key), state);
}
});
}
}
@Override
public Boolean getbit(final String key, final long offset) {
return d_getbit(key, offset).getResult();
}
public OperationResult<Boolean> d_getbit(final String key, final Long offset) {
return connPool.executeWithFailover(new BaseKeyOperation<Boolean>(key, OpName.GETBIT) {
@Override
public Boolean execute(Jedis client, ConnectionContext state) {
return client.getbit(key, offset);
}
});
}
@Override
public String getrange(final String key, final long startOffset, final long endOffset) {
return d_getrange(key, startOffset, endOffset).getResult();
}
public OperationResult<String> d_getrange(final String key, final Long startOffset, final Long endOffset) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.GETRANGE) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.getrange(key, startOffset, endOffset);
}
});
}
@Override
public String getSet(final String key, final String value) {
return d_getSet(key, value).getResult();
}
public OperationResult<String> d_getSet(final String key, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.GETSET) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.getSet(key, value);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<String>(key, OpName.GETSET) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return decompressValue(client.getSet(key, compressValue(value, state)), state);
}
});
}
}
@Override
public Long hdel(final String key, final String... fields) {
return d_hdel(key, fields).getResult();
}
public OperationResult<Long> d_hdel(final String key, final String... fields) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.HDEL) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.hdel(key, fields);
}
});
}
@Override
public Boolean hexists(final String key, final String field) {
return d_hexists(key, field).getResult();
}
public OperationResult<Boolean> d_hexists(final String key, final String field) {
return connPool.executeWithFailover(new BaseKeyOperation<Boolean>(key, OpName.HEXISTS) {
@Override
public Boolean execute(Jedis client, ConnectionContext state) {
return client.hexists(key, field);
}
});
}
@Override
public String hget(final String key, final String field) {
return d_hget(key, field).getResult();
}
public OperationResult<String> d_hget(final String key, final String field) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.HGET) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.hget(key, field);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<String>(key, OpName.HGET) {
@Override
public String execute(final Jedis client, final ConnectionContext state) throws DynoException {
return decompressValue(client.hget(key, field), state);
}
});
}
}
@Override
public Map<String, String> hgetAll(final String key) {
return d_hgetAll(key).getResult();
}
public OperationResult<Map<String, String>> d_hgetAll(final String key) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<Map<String, String>>(key, OpName.HGETALL) {
@Override
public Map<String, String> execute(Jedis client, ConnectionContext state) throws DynoException {
return client.hgetAll(key);
}
});
} else {
return connPool
.executeWithFailover(new CompressionValueOperation<Map<String, String>>(key, OpName.HGETALL) {
@Override
public Map<String, String> execute(final Jedis client, final ConnectionContext state) {
return CollectionUtils.transform(client.hgetAll(key),
new CollectionUtils.MapEntryTransform<String, String, String>() {
@Override
public String get(String key, String val) {
return decompressValue(val, state);
}
});
}
});
}
}
@Override
public Long hincrBy(final String key, final String field, final long value) {
return d_hincrBy(key, field, value).getResult();
}
public OperationResult<Long> d_hincrBy(final String key, final String field, final long value) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.HINCRBY) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.hincrBy(key, field, value);
}
});
}
/* not supported by RedisPipeline 2.7.3 */
public Double hincrByFloat(final String key, final String field, final double value) {
return d_hincrByFloat(key, field, value).getResult();
}
public OperationResult<Double> d_hincrByFloat(final String key, final String field, final double value) {
return connPool.executeWithFailover(new BaseKeyOperation<Double>(key, OpName.HINCRBYFLOAT) {
@Override
public Double execute(Jedis client, ConnectionContext state) {
return client.hincrByFloat(key, field, value);
}
});
}
@Override
public Long hsetnx(final String key, final String field, final String value) {
return d_hsetnx(key, field, value).getResult();
}
public OperationResult<Long> d_hsetnx(final String key, final String field, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.HSETNX) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.hsetnx(key, field, value);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<Long>(key, OpName.HSETNX) {
@Override
public Long execute(final Jedis client, final ConnectionContext state) throws DynoException {
return client.hsetnx(key, field, compressValue(value, state));
}
});
}
}
@Override
public Set<String> hkeys(final String key) {
return d_hkeys(key).getResult();
}
public OperationResult<Set<String>> d_hkeys(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.HKEYS) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.hkeys(key);
}
});
}
@Override
public ScanResult<Map.Entry<String, String>> hscan(final String key, final String cursor) {
return d_hscan(key, cursor).getResult();
}
public OperationResult<ScanResult<Map.Entry<String, String>>> d_hscan(final String key, final String cursor) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(
new BaseKeyOperation<ScanResult<Map.Entry<String, String>>>(key, OpName.HSCAN) {
@Override
public ScanResult<Map.Entry<String, String>> execute(Jedis client, ConnectionContext state) {
return client.hscan(key, cursor);
}
});
} else {
return connPool.executeWithFailover(
new CompressionValueOperation<ScanResult<Map.Entry<String, String>>>(key, OpName.HSCAN) {
@Override
public ScanResult<Map.Entry<String, String>> execute(final Jedis client,
final ConnectionContext state) {
return new ScanResult<>(cursor, new ArrayList(CollectionUtils.transform(
client.hscan(key, cursor).getResult(),
new CollectionUtils.Transform<Map.Entry<String, String>, Map.Entry<String, String>>() {
@Override
public Map.Entry<String, String> get(Map.Entry<String, String> entry) {
entry.setValue(decompressValue(entry.getValue(), state));
return entry;
}
})));
}
});
}
}
private String getCursorValue(final ConnectionContext state, final CursorBasedResult cursor) {
if (state != null && state.getMetadata("host") != null && cursor != null) {
return cursor.getCursorForHost(state.getMetadata("host").toString());
}
return "0";
}
private List<OperationResult<ScanResult<String>>> scatterGatherScan(final CursorBasedResult<String> cursor,
final int count, final String... pattern) {
if (!(cursor instanceof TokenRackMapper)) {
throw new DynoException("cursor does not implement the TokenRackMapper interface");
}
return new ArrayList<>(connPool.executeWithRing((TokenRackMapper) cursor, new BaseKeyOperation<ScanResult<String>>("SCAN", OpName.SCAN) {
@Override
public ScanResult<String> execute(final Jedis client, final ConnectionContext state) throws DynoException {
if (pattern != null && pattern.length > 0) {
ScanParams sp = new ScanParams().count(count);
for (String s : pattern) {
sp.match(s);
}
return client.scan(getCursorValue(state, cursor), sp);
} else {
return client.scan(getCursorValue(state, cursor));
}
}
}));
}
@Override
public Long hlen(final String key) {
return d_hlen(key).getResult();
}
public OperationResult<Long> d_hlen(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.HLEN) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.hlen(key);
}
});
}
@Override
public List<String> hmget(final String key, final String... fields) {
return d_hmget(key, fields).getResult();
}
public OperationResult<List<String>> d_hmget(final String key, final String... fields) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<List<String>>(key, OpName.HMGET) {
@Override
public List<String> execute(Jedis client, ConnectionContext state) {
return client.hmget(key, fields);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<List<String>>(key, OpName.HMGET) {
@Override
public List<String> execute(final Jedis client, final ConnectionContext state) throws DynoException {
return new ArrayList<String>(CollectionUtils.transform(client.hmget(key, fields),
new CollectionUtils.Transform<String, String>() {
@Override
public String get(String s) {
return decompressValue(s, state);
}
}));
}
});
}
}
@Override
public String hmset(final String key, final Map<String, String> hash) {
return d_hmset(key, hash).getResult();
}
public OperationResult<String> d_hmset(final String key, final Map<String, String> hash) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.HMSET) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.hmset(key, hash);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<String>(key, OpName.HMSET) {
@Override
public String execute(final Jedis client, final ConnectionContext state) throws DynoException {
return client.hmset(key, CollectionUtils.transform(hash,
new CollectionUtils.MapEntryTransform<String, String, String>() {
@Override
public String get(String key, String val) {
return compressValue(val, state);
}
}));
}
});
}
}
@Override
public Long hset(final String key, final String field, final String value) {
return d_hset(key, field, value).getResult();
}
@Override
public Long hset(String key, Map<String, String> hash) {
throw new UnsupportedOperationException("not yet implemented");
}
public OperationResult<Long> d_hset(final String key, final String field, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.HSET) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.hset(key, field, value);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<Long>(key, OpName.HSET) {
@Override
public Long execute(final Jedis client, final ConnectionContext state) throws DynoException {
return client.hset(key, field, compressValue(value, state));
}
});
}
}
@Override
public List<String> hvals(final String key) {
return d_hvals(key).getResult();
}
public OperationResult<List<String>> d_hvals(final String key) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<List<String>>(key, OpName.HVALS) {
@Override
public List<String> execute(Jedis client, ConnectionContext state) {
return client.hvals(key);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<List<String>>(key, OpName.HVALS) {
@Override
public List<String> execute(final Jedis client, final ConnectionContext state) throws DynoException {
return new ArrayList<String>(CollectionUtils.transform(client.hvals(key),
new CollectionUtils.Transform<String, String>() {
@Override
public String get(String s) {
return decompressValue(s, state);
}
}));
}
});
}
}
@Override
public Long incr(final String key) {
return d_incr(key).getResult();
}
public OperationResult<Long> d_incr(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.INCR) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.incr(key);
}
});
}
@Override
public Long incrBy(final String key, final long delta) {
return d_incrBy(key, delta).getResult();
}
public OperationResult<Long> d_incrBy(final String key, final Long delta) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.INCRBY) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.incrBy(key, delta);
}
});
}
public Double incrByFloat(final String key, final double increment) {
return d_incrByFloat(key, increment).getResult();
}
public OperationResult<Double> d_incrByFloat(final String key, final Double increment) {
return connPool.executeWithFailover(new BaseKeyOperation<Double>(key, OpName.INCRBYFLOAT) {
@Override
public Double execute(Jedis client, ConnectionContext state) {
return client.incrByFloat(key, increment);
}
});
}
@Override
public String lindex(final String key, final long index) {
return d_lindex(key, index).getResult();
}
public OperationResult<String> d_lindex(final String key, final Long index) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.LINDEX) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.lindex(key, index);
}
});
}
@Override
public Long linsert(final String key, final ListPosition where, final String pivot, final String value) {
return d_linsert(key, where, pivot, value).getResult();
}
public OperationResult<Long> d_linsert(final String key, final ListPosition where, final String pivot,
final String value) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.LINSERT) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.linsert(key, where, pivot, value);
}
});
}
@Override
public Long llen(final String key) {
return d_llen(key).getResult();
}
public OperationResult<Long> d_llen(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.LLEN) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.llen(key);
}
});
}
@Override
public String lpop(final String key) {
return d_lpop(key).getResult();
}
public OperationResult<String> d_lpop(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.LPOP) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.lpop(key);
}
});
}
@Override
public Long lpush(final String key, final String... values) {
return d_lpush(key, values).getResult();
}
public OperationResult<Long> d_lpush(final String key, final String... values) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.LPUSH) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.lpush(key, values);
}
});
}
@Override
public Long lpushx(final String key, final String... values) {
return d_lpushx(key, values).getResult();
}
public OperationResult<Long> d_lpushx(final String key, final String... values) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.LPUSHX) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.lpushx(key, values);
}
});
}
@Override
public List<String> lrange(final String key, final long start, final long end) {
return d_lrange(key, start, end).getResult();
}
public OperationResult<List<String>> d_lrange(final String key, final Long start, final Long end) {
return connPool.executeWithFailover(new BaseKeyOperation<List<String>>(key, OpName.LRANGE) {
@Override
public List<String> execute(Jedis client, ConnectionContext state) {
return client.lrange(key, start, end);
}
});
}
@Override
public Long lrem(final String key, final long count, final String value) {
return d_lrem(key, count, value).getResult();
}
public OperationResult<Long> d_lrem(final String key, final Long count, final String value) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.LREM) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.lrem(key, count, value);
}
});
}
@Override
public String lset(final String key, final long index, final String value) {
return d_lset(key, index, value).getResult();
}
public OperationResult<String> d_lset(final String key, final Long index, final String value) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.LSET) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.lset(key, index, value);
}
});
}
@Override
public String ltrim(final String key, final long start, final long end) {
return d_ltrim(key, start, end).getResult();
}
public OperationResult<String> d_ltrim(final String key, final long start, final long end) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.LTRIM) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.ltrim(key, start, end);
}
});
}
@Override
public Long persist(final String key) {
return d_persist(key).getResult();
}
public OperationResult<Long> d_persist(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.PERSIST) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.persist(key);
}
});
}
public Long pexpireAt(final String key, final long millisecondsTimestamp) {
return d_pexpireAt(key, millisecondsTimestamp).getResult();
}
public OperationResult<Long> d_pexpireAt(final String key, final Long millisecondsTimestamp) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.PEXPIREAT) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.pexpireAt(key, millisecondsTimestamp);
}
});
}
public Long pttl(final String key) {
return d_pttl(key).getResult();
}
@Override
public Long touch(String key) {
throw new UnsupportedOperationException("not yet implemented");
}
public OperationResult<Long> d_pttl(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.PTTL) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.pttl(key);
}
});
}
@Override
public String rename(String oldkey, String newkey) {
return d_rename(oldkey, newkey).getResult();
}
public OperationResult<String> d_rename(final String oldkey, final String newkey) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(oldkey, OpName.RENAME) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.rename(oldkey, newkey);
}
});
}
@Override
public Long renamenx(String oldkey, String newkey) {
return d_renamenx(oldkey, newkey).getResult();
}
public OperationResult<Long> d_renamenx(final String oldkey, final String newkey) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(oldkey, OpName.RENAMENX) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.renamenx(oldkey, newkey);
}
});
}
public String restore(final String key, final Integer ttl, final byte[] serializedValue) {
return d_restore(key, ttl, serializedValue).getResult();
}
public OperationResult<String> d_restore(final String key, final Integer ttl, final byte[] serializedValue) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.RESTORE) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.restore(key, ttl, serializedValue);
}
});
}
public String rpop(final String key) {
return d_rpop(key).getResult();
}
public OperationResult<String> d_rpop(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.RPOP) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.rpop(key);
}
});
}
public String rpoplpush(final String srckey, final String dstkey) {
return d_rpoplpush(srckey, dstkey).getResult();
}
public OperationResult<String> d_rpoplpush(final String srckey, final String dstkey) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(srckey, OpName.RPOPLPUSH) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.rpoplpush(srckey, dstkey);
}
});
}
public Long rpush(final String key, final String... values) {
return d_rpush(key, values).getResult();
}
public OperationResult<Long> d_rpush(final String key, final String... values) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.RPUSH) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.rpush(key, values);
}
});
}
@Override
public Long rpushx(final String key, final String... values) {
return d_rpushx(key, values).getResult();
}
public OperationResult<Long> d_rpushx(final String key, final String... values) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.RPUSHX) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.rpushx(key, values);
}
});
}
@Override
public Long sadd(final String key, final String... members) {
return d_sadd(key, members).getResult();
}
public OperationResult<Long> d_sadd(final String key, final String... members) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.SADD) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.sadd(key, members);
}
});
}
@Override
public Long scard(final String key) {
return d_scard(key).getResult();
}
public OperationResult<Long> d_scard(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.SCARD) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.scard(key);
}
});
}
public Set<String> sdiff(final String... keys) {
return d_sdiff(keys).getResult();
}
public OperationResult<Set<String>> d_sdiff(final String... keys) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(keys[0], OpName.SDIFF) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.sdiff(keys);
}
});
}
public Long sdiffstore(final String dstkey, final String... keys) {
return d_sdiffstore(dstkey, keys).getResult();
}
public OperationResult<Long> d_sdiffstore(final String dstkey, final String... keys) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(dstkey, OpName.SDIFFSTORE) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.sdiffstore(dstkey, keys);
}
});
}
@Override
public String set(final String key, final String value) {
return d_set(key, value).getResult();
}
public OperationResult<String> d_set(final String key, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.SET) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.set(key, value);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<String>(key, OpName.SET) {
@Override
public String execute(final Jedis client, final ConnectionContext state) throws DynoException {
return client.set(key, compressValue(value, state));
}
});
}
}
@Deprecated
/**
* use {@link set(String, String, SetParams)} instead
*/
public String set(final String key, final String value, final String nxxx, final String expx, final long time) {
return d_set(key, value, nxxx, expx, time).getResult();
}
public OperationResult<String> d_set(final String key, final String value, final String nxxx, final String expx,
final long time) {
SetParams setParams = SetParams.setParams();
if (nxxx.equalsIgnoreCase("NX")) {
setParams.nx();
} else if (nxxx.equalsIgnoreCase("XX")) {
setParams.xx();
}
if (expx.equalsIgnoreCase("EX")) {
setParams.ex((int) time);
} else if (expx.equalsIgnoreCase("PX")) {
setParams.px(time);
}
return d_set(key, value, setParams);
}
public String set(final String key, final String value, final SetParams setParams) {
return d_set(key, value, setParams).getResult();
}
public OperationResult<String> d_set(final String key, final String value, final SetParams setParams) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy())
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.SET) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.set(key, value, setParams);
}
});
else {
return connPool.executeWithFailover(new CompressionValueOperation<String>(key, OpName.SET) {
@Override
public String execute(final Jedis client, final ConnectionContext state) throws DynoException {
return client.set(key, compressValue(value, state), setParams);
}
});
}
}
@Override
public Boolean setbit(final String key, final long offset, final boolean value) {
return d_setbit(key, offset, value).getResult();
}
public OperationResult<Boolean> d_setbit(final String key, final Long offset, final Boolean value) {
return connPool.executeWithFailover(new BaseKeyOperation<Boolean>(key, OpName.SETBIT) {
@Override
public Boolean execute(Jedis client, ConnectionContext state) {
return client.setbit(key, offset, value);
}
});
}
@Override
public Boolean setbit(final String key, final long offset, final String value) {
return d_setbit(key, offset, value).getResult();
}
public OperationResult<Boolean> d_setbit(final String key, final Long offset, final String value) {
return connPool.executeWithFailover(new BaseKeyOperation<Boolean>(key, OpName.SETBIT) {
@Override
public Boolean execute(Jedis client, ConnectionContext state) {
return client.setbit(key, offset, value);
}
});
}
@Override
public String setex(final String key, final int seconds, final String value) {
return d_setex(key, seconds, value).getResult();
}
public OperationResult<String> d_setex(final String key, final Integer seconds, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.SETEX) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.setex(key, seconds, value);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<String>(key, OpName.SETEX) {
@Override
public String execute(final Jedis client, final ConnectionContext state) throws DynoException {
return client.setex(key, seconds, compressValue(value, state));
}
});
}
}
@Override
public String psetex(final String key, final long milliseconds, final String value) {
return d_psetex(key, milliseconds, value).getResult();
}
public OperationResult<String> d_psetex(final String key, final long milliseconds, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.PSETEX) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.psetex(key, milliseconds, value);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<String>(key, OpName.PSETEX) {
@Override
public String execute(final Jedis client, final ConnectionContext state) throws DynoException {
return client.psetex(key, milliseconds, compressValue(value, state));
}
});
}
}
@Override
public Long setnx(final String key, final String value) {
return d_setnx(key, value).getResult();
}
public OperationResult<Long> d_setnx(final String key, final String value) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.SETNX) {
@Override
public Long execute(Jedis client, ConnectionContext state) throws DynoException {
return client.setnx(key, value);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueOperation<Long>(key, OpName.SETNX) {
@Override
public Long execute(final Jedis client, final ConnectionContext state) {
return client.setnx(key, compressValue(value, state));
}
});
}
}
@Override
public Long setrange(final String key, final long offset, final String value) {
return d_setrange(key, offset, value).getResult();
}
public OperationResult<Long> d_setrange(final String key, final Long offset, final String value) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.SETRANGE) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.setrange(key, offset, value);
}
});
}
@Override
public Boolean sismember(final String key, final String member) {
return d_sismember(key, member).getResult();
}
public OperationResult<Boolean> d_sismember(final String key, final String member) {
return connPool.executeWithFailover(new BaseKeyOperation<Boolean>(key, OpName.SISMEMBER) {
@Override
public Boolean execute(Jedis client, ConnectionContext state) {
return client.sismember(key, member);
}
});
}
@Override
public Set<String> smembers(final String key) {
return d_smembers(key).getResult();
}
public OperationResult<Set<String>> d_smembers(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.SMEMBERS) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.smembers(key);
}
});
}
public Long smove(final String srckey, final String dstkey, final String member) {
return d_smove(srckey, dstkey, member).getResult();
}
public OperationResult<Long> d_smove(final String srckey, final String dstkey, final String member) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(srckey, OpName.SMOVE) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.smove(srckey, dstkey, member);
}
});
}
@Override
public List<String> sort(String key) {
return d_sort(key).getResult();
}
public OperationResult<List<String>> d_sort(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<List<String>>(key, OpName.SORT) {
@Override
public List<String> execute(Jedis client, ConnectionContext state) {
return client.sort(key);
}
});
}
@Override
public List<String> sort(String key, SortingParams sortingParameters) {
return d_sort(key, sortingParameters).getResult();
}
public OperationResult<List<String>> d_sort(final String key, final SortingParams sortingParameters) {
return connPool.executeWithFailover(new BaseKeyOperation<List<String>>(key, OpName.SORT) {
@Override
public List<String> execute(Jedis client, ConnectionContext state) {
return client.sort(key, sortingParameters);
}
});
}
@Override
public String spop(final String key) {
return d_spop(key).getResult();
}
public OperationResult<String> d_spop(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.SPOP) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.spop(key);
}
});
}
@Override
public Set<String> spop(String key, long count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String srandmember(final String key) {
return d_srandmember(key).getResult();
}
@Override
public List<String> srandmember(String key, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
public OperationResult<String> d_srandmember(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.SRANDMEMBER) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.srandmember(key);
}
});
}
@Override
public Long srem(final String key, final String... members) {
return d_srem(key, members).getResult();
}
public OperationResult<Long> d_srem(final String key, final String... members) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.SREM) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.srem(key, members);
}
});
}
@Override
public ScanResult<String> sscan(final String key, final String cursor) {
return d_sscan(key, cursor).getResult();
}
public OperationResult<ScanResult<String>> d_sscan(final String key, final String cursor) {
return connPool.executeWithFailover(new BaseKeyOperation<ScanResult<String>>(key, OpName.SSCAN) {
@Override
public ScanResult<String> execute(Jedis client, ConnectionContext state) {
return client.sscan(key, cursor);
}
});
}
@Override
public ScanResult<String> sscan(final String key, final String cursor, final ScanParams params) {
return d_sscan(key, cursor, params).getResult();
}
public OperationResult<ScanResult<String>> d_sscan(final String key, final String cursor, final ScanParams params) {
return connPool.executeWithFailover(new BaseKeyOperation<ScanResult<String>>(key, OpName.SSCAN) {
@Override
public ScanResult<String> execute(Jedis client, ConnectionContext state) {
return client.sscan(key, cursor, params);
}
});
}
@Override
public Long strlen(final String key) {
return d_strlen(key).getResult();
}
public OperationResult<Long> d_strlen(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.STRLEN) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.strlen(key);
}
});
}
@Override
public String substr(String key, int start, int end) {
return d_substr(key, start, end).getResult();
}
public OperationResult<String> d_substr(final String key, final Integer start, final Integer end) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.SUBSTR) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.substr(key, start, end);
}
});
}
@Override
public Long ttl(final String key) {
return d_ttl(key).getResult();
}
public OperationResult<Long> d_ttl(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.TTL) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.ttl(key);
}
});
}
@Override
public String type(final String key) {
return d_type(key).getResult();
}
public OperationResult<String> d_type(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.TYPE) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.type(key);
}
});
}
@Override
public Long zadd(String key, double score, String member) {
return d_zadd(key, score, member).getResult();
}
public OperationResult<Long> d_zadd(final String key, final Double score, final String member) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZADD) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zadd(key, score, member);
}
});
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers) {
return d_zadd(key, scoreMembers).getResult();
}
public OperationResult<Long> d_zadd(final String key, final Map<String, Double> scoreMembers) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZADD) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zadd(key, scoreMembers);
}
});
}
@Override
public Long zadd(String key, double score, String member, ZAddParams params) {
return d_zadd(key, score, member, params).getResult();
}
public OperationResult<Long> d_zadd(final String key, final double score, final String member,
final ZAddParams params) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZADD) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zadd(key, score, member, params);
}
});
}
@Override
public Long zcard(final String key) {
return d_zcard(key).getResult();
}
public OperationResult<Long> d_zcard(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZCARD) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zcard(key);
}
});
}
@Override
public Long zcount(final String key, final double min, final double max) {
return d_zcount(key, min, max).getResult();
}
public OperationResult<Long> d_zcount(final String key, final Double min, final Double max) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZCOUNT) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zcount(key, min, max);
}
});
}
@Override
public Long zcount(String key, String min, String max) {
return d_zcount(key, min, max).getResult();
}
public OperationResult<Long> d_zcount(final String key, final String min, final String max) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZCOUNT) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zcount(key, min, max);
}
});
}
@Override
public Double zincrby(final String key, final double score, final String member) {
return d_zincrby(key, score, member).getResult();
}
public OperationResult<Double> d_zincrby(final String key, final Double score, final String member) {
return connPool.executeWithFailover(new BaseKeyOperation<Double>(key, OpName.ZINCRBY) {
@Override
public Double execute(Jedis client, ConnectionContext state) {
return client.zincrby(key, score, member);
}
});
}
@Override
public Set<String> zrange(String key, long start, long end) {
return d_zrange(key, start, end).getResult();
}
public OperationResult<Set<String>> d_zrange(final String key, final Long start, final Long end) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZRANGE) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrange(key, start, end);
}
});
}
@Override
public Long zrank(final String key, final String member) {
return d_zrank(key, member).getResult();
}
public OperationResult<Long> d_zrank(final String key, final String member) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZRANK) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zrank(key, member);
}
});
}
@Override
public Long zrem(String key, String... member) {
return d_zrem(key, member).getResult();
}
public OperationResult<Long> d_zrem(final String key, final String... member) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZREM) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zrem(key, member);
}
});
}
@Override
public Long zremrangeByRank(final String key, final long start, final long end) {
return d_zremrangeByRank(key, start, end).getResult();
}
public OperationResult<Long> d_zremrangeByRank(final String key, final Long start, final Long end) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZREMRANGEBYRANK) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zremrangeByRank(key, start, end);
}
});
}
@Override
public Long zremrangeByScore(final String key, final double start, final double end) {
return d_zremrangeByScore(key, start, end).getResult();
}
public OperationResult<Long> d_zremrangeByScore(final String key, final Double start, final Double end) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZREMRANGEBYSCORE) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zremrangeByScore(key, start, end);
}
});
}
@Override
public Set<String> zrevrange(String key, long start, long end) {
return d_zrevrange(key, start, end).getResult();
}
public OperationResult<Set<String>> d_zrevrange(final String key, final Long start, final Long end) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZREVRANGE) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrevrange(key, start, end);
}
});
}
@Override
public Long zrevrank(final String key, final String member) {
return d_zrevrank(key, member).getResult();
}
public OperationResult<Long> d_zrevrank(final String key, final String member) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZREVRANK) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zrevrank(key, member);
}
});
}
@Override
public Set<Tuple> zrangeWithScores(String key, long start, long end) {
return d_zrangeWithScores(key, start, end).getResult();
}
public OperationResult<Set<Tuple>> d_zrangeWithScores(final String key, final Long start, final Long end) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<Tuple>>(key, OpName.ZRANGEWITHSCORES) {
@Override
public Set<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zrangeWithScores(key, start, end);
}
});
}
@Override
public Set<Tuple> zrevrangeWithScores(String key, long start, long end) {
return d_zrevrangeWithScores(key, start, end).getResult();
}
public OperationResult<Set<Tuple>> d_zrevrangeWithScores(final String key, final Long start, final Long end) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<Tuple>>(key, OpName.ZREVRANGEWITHSCORES) {
@Override
public Set<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zrevrangeWithScores(key, start, end);
}
});
}
@Override
public Double zscore(final String key, final String member) {
return d_zscore(key, member).getResult();
}
public OperationResult<Double> d_zscore(final String key, final String member) {
return connPool.executeWithFailover(new BaseKeyOperation<Double>(key, OpName.ZSCORE) {
@Override
public Double execute(Jedis client, ConnectionContext state) {
return client.zscore(key, member);
}
});
}
@Override
public ScanResult<Tuple> zscan(final String key, final String cursor) {
return d_zscan(key, cursor).getResult();
}
public OperationResult<ScanResult<Tuple>> d_zscan(final String key, final String cursor) {
return connPool.executeWithFailover(new BaseKeyOperation<ScanResult<Tuple>>(key, OpName.ZSCAN) {
@Override
public ScanResult<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zscan(key, cursor);
}
});
}
@Override
public Set<String> zrangeByScore(String key, double min, double max) {
return d_zrangeByScore(key, min, max).getResult();
}
public OperationResult<Set<String>> d_zrangeByScore(final String key, final Double min, final Double max) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZRANGEBYSCORE) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrangeByScore(key, min, max);
}
});
}
@Override
public Set<String> zrangeByScore(String key, String min, String max) {
return d_zrangeByScore(key, min, max).getResult();
}
public OperationResult<Set<String>> d_zrangeByScore(final String key, final String min, final String max) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZRANGEBYSCORE) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrangeByScore(key, min, max);
}
});
}
@Override
public Set<String> zrangeByScore(String key, double min, double max, int offset, int count) {
return d_zrangeByScore(key, min, max, offset, count).getResult();
}
public OperationResult<Set<String>> d_zrangeByScore(final String key, final Double min, final Double max,
final Integer offset, final Integer count) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZRANGEBYSCORE) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrangeByScore(key, min, max, offset, count);
}
});
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min) {
return d_zrevrangeByScore(key, max, min).getResult();
}
public OperationResult<Set<String>> d_zrevrangeByScore(final String key, final String max, final String min) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZREVRANGEBYSCORE) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrevrangeByScore(key, max, min);
}
});
}
@Override
public Set<String> zrangeByScore(String key, String min, String max, int offset, int count) {
return d_zrangeByScore(key, min, max, offset, count).getResult();
}
public OperationResult<Set<String>> d_zrangeByScore(final String key, final String min, final String max,
final Integer offset, final Integer count) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZRANGEBYSCORE) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrangeByScore(key, min, max, offset, count);
}
});
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min, int offset, int count) {
return d_zrevrangeByScore(key, max, min, offset, count).getResult();
}
public OperationResult<Set<String>> d_zrevrangeByScore(final String key, final Double max, final Double min,
final Integer offset, final Integer count) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZREVRANGEBYSCORE) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrevrangeByScore(key, max, min, offset, count);
}
});
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min) {
return d_zrevrangeByScore(key, max, min).getResult();
}
public OperationResult<Set<String>> d_zrevrangeByScore(final String key, final Double max, final Double min) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZREVRANGEBYSCORE) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrevrangeByScore(key, max, min);
}
});
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, double min, double max) {
return d_zrangeByScoreWithScores(key, min, max).getResult();
}
public OperationResult<Set<Tuple>> d_zrangeByScoreWithScores(final String key, final Double min, final Double max) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<Tuple>>(key, OpName.ZREVRANGEBYSCORE) {
@Override
public Set<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zrangeByScoreWithScores(key, min, max);
}
});
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, double max, double min) {
return d_zrevrangeByScoreWithScores(key, min, max).getResult();
}
public OperationResult<Set<Tuple>> d_zrevrangeByScoreWithScores(final String key, final Double max,
final Double min) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<Tuple>>(key, OpName.ZREVRANGEBYSCOREWITHSCORES) {
@Override
public Set<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zrevrangeByScoreWithScores(key, max, min);
}
});
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, double min, double max, int offset, int count) {
return d_zrangeByScoreWithScores(key, min, max, offset, count).getResult();
}
public OperationResult<Set<Tuple>> d_zrangeByScoreWithScores(final String key, final Double min, final Double max,
final Integer offset, final Integer count) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<Tuple>>(key, OpName.ZRANGEBYSCOREWITHSCORES) {
@Override
public Set<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zrangeByScoreWithScores(key, min, max, offset, count);
}
});
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min, int offset, int count) {
return d_zrevrangeByScore(key, max, min, offset, count).getResult();
}
public OperationResult<Set<String>> d_zrevrangeByScore(final String key, final String max, final String min,
final Integer offset, final Integer count) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZREVRANGEBYSCORE) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrevrangeByScore(key, max, min, offset, count);
}
});
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, String min, String max) {
return d_zrangeByScoreWithScores(key, min, max).getResult();
}
public OperationResult<Set<Tuple>> d_zrangeByScoreWithScores(final String key, final String min, final String max) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<Tuple>>(key, OpName.ZRANGEBYSCOREWITHSCORES) {
@Override
public Set<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zrangeByScoreWithScores(key, min, max);
}
});
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, String max, String min) {
return d_zrevrangeByScoreWithScores(key, max, min).getResult();
}
public OperationResult<Set<Tuple>> d_zrevrangeByScoreWithScores(final String key, final String max,
final String min) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<Tuple>>(key, OpName.ZREVRANGEBYSCOREWITHSCORES) {
@Override
public Set<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zrevrangeByScoreWithScores(key, max, min);
}
});
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, String min, String max, int offset, int count) {
return d_zrangeByScoreWithScores(key, min, max, offset, count).getResult();
}
public OperationResult<Set<Tuple>> d_zrangeByScoreWithScores(final String key, final String min, final String max,
final Integer offset, final Integer count) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<Tuple>>(key, OpName.ZRANGEBYSCOREWITHSCORES) {
@Override
public Set<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zrangeByScoreWithScores(key, min, max, offset, count);
}
});
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, double max, double min, int offset, int count) {
return d_zrevrangeByScoreWithScores(key, max, min, offset, count).getResult();
}
public OperationResult<Set<Tuple>> d_zrevrangeByScoreWithScores(final String key, final Double max,
final Double min, final Integer offset, final Integer count) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<Tuple>>(key, OpName.ZREVRANGEBYSCOREWITHSCORES) {
@Override
public Set<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
});
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, String max, String min, int offset, int count) {
return d_zrevrangeByScoreWithScores(key, max, min, offset, count).getResult();
}
public OperationResult<Set<Tuple>> d_zrevrangeByScoreWithScores(final String key, final String max,
final String min, final Integer offset, final Integer count) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<Tuple>>(key, OpName.ZREVRANGEBYSCOREWITHSCORES) {
@Override
public Set<Tuple> execute(Jedis client, ConnectionContext state) {
return client.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
});
}
@Override
public Long zremrangeByScore(String key, String start, String end) {
return d_zremrangeByScore(key, start, end).getResult();
}
@Override
public Long zlexcount(String key, String min, String max) {
return d_zlexcount(key, min, max).getResult();
}
public OperationResult<Long> d_zlexcount(final String key, final String min, final String max) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZLEXCOUNT) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zlexcount(key, min, max);
}
});
}
@Override
public Set<String> zrangeByLex(String key, String min, String max) {
return d_zrangeByLex(key, min, max).getResult();
}
public OperationResult<Set<String>> d_zrangeByLex(final String key, final String min, final String max) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZRANGEBYLEX) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrangeByLex(key, min, max);
}
});
}
@Override
public Set<String> zrangeByLex(String key, String min, String max, int offset, int count) {
return d_zrangeByLex(key, min, max, offset, count).getResult();
}
public OperationResult<Set<String>> d_zrangeByLex(final String key, final String min, final String max,
final int offset, final int count) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZRANGEBYLEX) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrangeByLex(key, min, max, offset, count);
}
});
}
@Override
public Long zremrangeByLex(String key, String min, String max) {
return d_zremrangeByLex(key, min, max).getResult();
}
public OperationResult<Long> d_zremrangeByLex(final String key, final String min, final String max) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZREMRANGEBYLEX) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zremrangeByLex(key, min, max);
}
});
}
public OperationResult<Long> d_zremrangeByScore(final String key, final String start, final String end) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZREMRANGEBYSCORE) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zremrangeByScore(key, start, end);
}
});
}
@Override
public List<String> blpop(int timeout, String key) {
return d_blpop(timeout, key).getResult();
}
public OperationResult<List<String>> d_blpop(final int timeout, final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<List<String>>(key, OpName.BLPOP) {
@Override
public List<String> execute(Jedis client, ConnectionContext state) {
return client.blpop(timeout, key);
}
});
}
@Override
public List<String> brpop(int timeout, String key) {
return d_brpop(timeout, key).getResult();
}
public OperationResult<List<String>> d_brpop(final int timeout, final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<List<String>>(key, OpName.BRPOP) {
@Override
public List<String> execute(Jedis client, ConnectionContext state) {
return client.brpop(timeout, key);
}
});
}
@Override
public String echo(String string) {
return d_echo(string).getResult();
}
public OperationResult<String> d_echo(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.ECHO) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.echo(key);
}
});
}
@Override
public Long move(String key, int dbIndex) {
return d_move(key, dbIndex).getResult();
}
public OperationResult<Long> d_move(final String key, final Integer dbIndex) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.MOVE) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.move(key, dbIndex);
}
});
}
@Override
public Long bitcount(String key) {
return d_bitcount(key).getResult();
}
public OperationResult<Long> d_bitcount(final String key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.BITCOUNT) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.bitcount(key);
}
});
}
@Override
public Long pfadd(String key, String... elements) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public long pfcount(String key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long bitcount(String key, long start, long end) {
return d_bitcount(key, start, end).getResult();
}
public OperationResult<Long> d_bitcount(final String key, final Long start, final Long end) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.BITCOUNT) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.bitcount(key, start, end);
}
});
}
/**
* MULTI-KEY COMMANDS
*/
@Override
public List<String> blpop(int timeout, String... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<String> brpop(int timeout, String... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<String> blpop(String... args) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<String> brpop(String... args) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<String> keys(String pattern) {
Set<String> allResults = new HashSet<String>();
Collection<OperationResult<Set<String>>> results = d_keys(pattern);
for (OperationResult<Set<String>> result : results) {
allResults.addAll(result.getResult());
}
return allResults;
}
/**
* Use this with care, especially in the context of production databases.
*
* @param pattern Specifies the mach set for keys
* @return a collection of operation results
* @see <a href="http://redis.io/commands/KEYS">keys</a>
*/
public Collection<OperationResult<Set<String>>> d_keys(final String pattern) {
Logger.warn("Executing d_keys for pattern: " + pattern);
Collection<OperationResult<Set<String>>> results = connPool
.executeWithRing(new CursorBasedResultImpl<String>(new LinkedHashMap<String, ScanResult<String>>()), new BaseKeyOperation<Set<String>>(pattern, OpName.KEYS) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) throws DynoException {
return client.keys(pattern);
}
});
return results;
}
@Override
public Long pexpire(String key, long milliseconds) {
throw new UnsupportedOperationException("not yet implemented");
}
/**
* Get values for all the keys provided. Returns a list of string values
* corresponding to individual keys. If one of the key is missing, the
* return list has null as its corresponding value.
*
* @param keys: variable list of keys to query
* @return list of string values
* @see <a href="http://redis.io/commands/MGET">mget</a>
*/
@Override
public List<String> mget(String... keys) {
return d_mget(keys).getResult();
}
public OperationResult<List<String>> d_mget(final String... keys) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new MultiKeyOperation<List<String>>(Arrays.asList(keys), OpName.MGET) {
@Override
public List<String> execute(Jedis client, ConnectionContext state) {
return client.mget(keys);
}
});
} else {
return connPool.executeWithFailover(
new CompressionValueMultiKeyOperation<List<String>>(Arrays.asList(keys), OpName.MGET) {
@Override
public List<String> execute(final Jedis client, final ConnectionContext state)
throws DynoException {
return new ArrayList<String>(CollectionUtils.transform(client.mget(keys),
new CollectionUtils.Transform<String, String>() {
@Override
public String get(String s) {
return decompressValue(state, s);
}
}));
}
});
}
}
@Override
public Long exists(String... arg0) {
return d_exists(arg0).getResult();
}
public OperationResult<Long> d_exists(final String... arg0) {
return connPool.executeWithFailover(new MultiKeyOperation<Long>(Arrays.asList(arg0), OpName.EXISTS) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.exists(arg0);
}
});
}
@Override
public Long del(String... keys) {
return d_del(keys).getResult();
}
@Override
public Long unlink(String... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
public OperationResult<Long> d_del(final String... keys) {
return connPool.executeWithFailover(new MultiKeyOperation<Long>(Arrays.asList(keys), OpName.DEL) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.del(keys);
}
});
}
@Override
public Long msetnx(String... keysvalues) {
return d_msetnx(keysvalues).getResult();
}
public OperationResult<Long> d_msetnx(final String... keysvalues) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new MultiKeyOperation<Long>(Arrays.asList(keysvalues), OpName.MSETNX) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.msetnx(keysvalues);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueMultiKeyOperation<Long>(Arrays.asList(keysvalues), OpName.MSETNX) {
@Override
public Long execute(final Jedis client, final ConnectionContext state) {
return client.msetnx(compressMultiKeyValue(state, keysvalues));
}
});
}
}
@Override
public String mset(String... keysvalues) {
return d_mset(keysvalues).getResult();
}
public OperationResult<String> d_mset(final String... keysvalues) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new MultiKeyOperation<String>(Arrays.asList(keysvalues), OpName.MSET) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.mset(keysvalues);
}
});
} else {
return connPool.executeWithFailover(new CompressionValueMultiKeyOperation<String>(Arrays.asList(keysvalues), OpName.MSET) {
@Override
public String execute(final Jedis client, final ConnectionContext state) {
return client.mset(compressMultiKeyValue(state, keysvalues));
}
});
}
}
@Override
public Set<String> sinter(String... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
public Long sinterstore(final String dstkey, final String... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long sort(String key, SortingParams sortingParameters, String dstkey) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long sort(String key, String dstkey) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<String> sunion(String... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long sunionstore(String dstkey, String... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String watch(String... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long del(byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long unlink(byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long exists(byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<byte[]> blpop(int timeout, byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<byte[]> brpop(int timeout, byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<byte[]> blpop(byte[]... args) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<byte[]> brpop(byte[]... args) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> keys(byte[] pattern) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<byte[]> mget(byte[]... keys) {
return d_mget(keys).getResult();
}
public OperationResult<List<byte[]>> d_mget(final byte[]... keys) {
if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
return connPool.executeWithFailover(new MultiKeyOperation<List<byte[]>>(Arrays.asList(keys), OpName.MGET) {
@Override
public List<byte[]> execute(Jedis client, ConnectionContext state) {
return client.mget(keys);
}
});
} else {
return connPool.executeWithFailover(
new CompressionValueMultiKeyOperation<List<byte[]>>(Arrays.asList(keys), OpName.MGET) {
@Override
public List<byte[]> execute(final Jedis client, final ConnectionContext state)
throws DynoException {
return new ArrayList<>(CollectionUtils.transform(client.mget(keys),
new CollectionUtils.Transform<byte[], byte[]>() {
@Override
public byte[] get(byte[] s) {
return decompressValue(state, String.valueOf(s)).getBytes();
}
}));
}
});
}
}
@Override
public String mset(byte[]... keysvalues) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long msetnx(byte[]... keysvalues) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String rename(byte[] oldkey, byte[] newkey) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long renamenx(byte[] oldkey, byte[] newkey) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] rpoplpush(byte[] srckey, byte[] dstkey) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> sdiff(byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long sdiffstore(byte[] dstkey, byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> sinter(byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long sinterstore(byte[] dstkey, byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long smove(byte[] srckey, byte[] dstkey, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long sort(byte[] key, SortingParams sortingParameters, byte[] dstkey) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long sort(byte[] key, byte[] dstkey) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> sunion(byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long sunionstore(byte[] dstkey, byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String watch(byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String unwatch() {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zinterstore(byte[] dstkey, byte[]... sets) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zinterstore(byte[] dstkey, ZParams params, byte[]... sets) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zunionstore(byte[] dstkey, byte[]... sets) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zunionstore(byte[] dstkey, ZParams params, byte[]... sets) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] brpoplpush(byte[] source, byte[] destination, int timeout) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long publish(byte[] channel, byte[] message) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public void subscribe(BinaryJedisPubSub jedisPubSub, byte[]... channels) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public void psubscribe(BinaryJedisPubSub jedisPubSub, byte[]... patterns) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] randomBinaryKey() {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long bitop(BitOP op, byte[] destKey, byte[]... srcKeys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String pfmerge(byte[] destkey, byte[]... sourcekeys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long pfcount(byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long touch(byte[]... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zinterstore(String dstkey, String... sets) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zinterstore(String dstkey, ZParams params, String... sets) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min) {
return d_zrevrangeByLex(key, max, min).getResult();
}
public OperationResult<Set<String>> d_zrevrangeByLex(final String key, final String max, final String min) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZREVRANGEBYLEX) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrangeByLex(key, max, min);
}
});
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min, int offset, int count) {
return d_zrevrangeByLex(key, max, min, offset, count).getResult();
}
public OperationResult<Set<String>> d_zrevrangeByLex(final String key, final String max, final String min,
final int offset, final int count) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<String>>(key, OpName.ZREVRANGEBYLEX) {
@Override
public Set<String> execute(Jedis client, ConnectionContext state) {
return client.zrangeByLex(key, max, min, offset, count);
}
});
}
@Override
public Long zunionstore(String dstkey, String... sets) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zunionstore(String dstkey, ZParams params, String... sets) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String brpoplpush(String source, String destination, int timeout) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long publish(String channel, String message) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public void subscribe(JedisPubSub jedisPubSub, String... channels) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public void psubscribe(JedisPubSub jedisPubSub, String... patterns) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String randomKey() {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long bitop(BitOP op, String destKey, String... srcKeys) {
throw new UnsupportedOperationException("not yet implemented");
}
/******************* Jedis Binary Commands **************/
@Override
public String set(final byte[] key, final byte[] value) {
return d_set(key, value).getResult();
}
public OperationResult<String> d_set(final byte[] key, final byte[] value) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.SET) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.set(key, value);
}
});
}
@Override
public byte[] get(final byte[] key) {
return d_get(key).getResult();
}
public OperationResult<byte[]> d_get(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<byte[]>(key, OpName.GET) {
@Override
public byte[] execute(Jedis client, ConnectionContext state) throws DynoException {
return client.get(key);
}
});
}
@Override
public String setex(final byte[] key, final int seconds, final byte[] value) {
return d_setex(key, seconds, value).getResult();
}
public OperationResult<String> d_setex(final byte[] key, final Integer seconds, final byte[] value) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.SETEX) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.setex(key, seconds, value);
}
});
}
@Override
public String psetex(byte[] key, long milliseconds, byte[] value) {
return d_psetex(key, milliseconds, value).getResult();
}
public OperationResult<String> d_psetex(final byte[] key, final Long milliseconds, final byte[] value) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.PSETEX) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.psetex(key, milliseconds, value);
}
});
}
@Override
public String set(final byte[] key, final byte[] value, final SetParams setParams) {
return d_set(key, value, setParams).getResult();
}
public OperationResult<String> d_set(final byte[] key, final byte[] value, final SetParams setParams) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.SET) {
@Override
public String execute(Jedis client, ConnectionContext state) throws DynoException {
return client.set(key, value, setParams);
}
});
}
@Override
public Boolean exists(final byte[] key) {
return d_exists(key).getResult();
}
public OperationResult<Boolean> d_exists(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Boolean>(key, OpName.EXISTS) {
@Override
public Boolean execute(Jedis client, ConnectionContext state) {
return client.exists(key);
}
});
}
@Override
public Long persist(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String type(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] dump(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String restore(byte[] key, int ttl, byte[] serializedValue) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String restoreReplace(byte[] key, int ttl, byte[] serializedValue) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long expire(final byte[] key, final int seconds) {
return d_expire(key, seconds).getResult();
}
public OperationResult<Long> d_expire(final byte[] key, final int seconds) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.EXPIRE) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.expire(key, seconds);
}
});
}
@Override
public Long pexpire(byte[] key, final long milliseconds) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long expireAt(final byte[] key, final long unixTime) {
return d_expireAt(key, unixTime).getResult();
}
public OperationResult<Long> d_expireAt(final byte[] key, final long unixTime) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.EXPIREAT) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.expireAt(key, unixTime);
}
});
}
@Override
public Long pexpireAt(byte[] key, long millisecondsTimestamp) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long ttl(final byte[] key) {
return d_ttl(key).getResult();
}
public OperationResult<Long> d_ttl(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.TTL) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.ttl(key);
}
});
}
@Override
public Long pttl(byte[] key) {
return d_pttl(key).getResult();
}
public OperationResult<Long> d_pttl(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.PTTL) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.pttl(key);
}
});
}
@Override
public Long touch(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Boolean setbit(byte[] key, long offset, boolean value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Boolean setbit(byte[] key, long offset, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Boolean getbit(byte[] key, long offset) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long setrange(byte[] key, long offset, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] getrange(byte[] key, long startOffset, long endOffset) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] getSet(byte[] key, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long setnx(byte[] key, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long decrBy(byte[] key, long integer) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long decr(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long incrBy(byte[] key, long integer) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Double incrByFloat(byte[] key, double value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long incr(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long append(byte[] key, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] substr(byte[] key, int start, int end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long hset(final byte[] key, final byte[] field, final byte[] value) {
return d_hset(key, field, value).getResult();
}
@Override
public Long hset(byte[] key, Map<byte[], byte[]> hash) {
throw new UnsupportedOperationException("not yet implemented");
}
public OperationResult<Long> d_hset(final byte[] key, final byte[] field, final byte[] value) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.HSET) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.hset(key, field, value);
}
});
}
@Override
public byte[] hget(final byte[] key, final byte[] field) {
return d_hget(key, field).getResult();
}
public OperationResult<byte[]> d_hget(final byte[] key, final byte[] field) {
return connPool.executeWithFailover(new BaseKeyOperation<byte[]>(key, OpName.HGET) {
@Override
public byte[] execute(Jedis client, ConnectionContext state) throws DynoException {
return client.hget(key, field);
}
});
}
@Override
public Long hsetnx(byte[] key, byte[] field, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String hmset(final byte[] key, final Map<byte[], byte[]> hash) {
return d_hmset(key, hash).getResult();
}
public OperationResult<String> d_hmset(final byte[] key, final Map<byte[], byte[]> hash) {
return connPool.executeWithFailover(new BaseKeyOperation<String>(key, OpName.HMSET) {
@Override
public String execute(Jedis client, ConnectionContext state) {
return client.hmset(key, hash);
}
});
}
@Override
public List<byte[]> hmget(final byte[] key, final byte[]... fields) {
return d_hmget(key, fields).getResult();
}
public OperationResult<List<byte[]>> d_hmget(final byte[] key, final byte[]... fields) {
return connPool.executeWithFailover(new BaseKeyOperation<List<byte[]>>(key, OpName.HMGET) {
@Override
public List<byte[]> execute(Jedis client, ConnectionContext state) {
return client.hmget(key, fields);
}
});
}
@Override
public Long hincrBy(byte[] key, byte[] field, long value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Double hincrByFloat(byte[] key, byte[] field, double value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Boolean hexists(byte[] key, byte[] field) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long hdel(final byte[] key, final byte[]... fields) {
return d_hdel(key, fields).getResult();
}
public OperationResult<Long> d_hdel(final byte[] key, final byte[]... fields) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.HDEL) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.hdel(key, fields);
}
});
}
@Override
public Long hlen(final byte[] key) {
return d_hlen(key).getResult();
}
public OperationResult<Long> d_hlen(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.HLEN) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.hlen(key);
}
});
}
@Override
public Set<byte[]> hkeys(final byte[] key) {
return d_hkeys(key).getResult();
}
public OperationResult<Set<byte[]>> d_hkeys(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<byte[]>>(key, OpName.HKEYS) {
@Override
public Set<byte[]> execute(Jedis client, ConnectionContext state) {
return client.hkeys(key);
}
});
}
@Override
public Collection<byte[]> hvals(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Map<byte[], byte[]> hgetAll(final byte[] key) {
return d_hgetAll(key).getResult();
}
public OperationResult<Map<byte[], byte[]>> d_hgetAll(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Map<byte[], byte[]>>(key, OpName.HGETALL) {
@Override
public Map<byte[], byte[]> execute(Jedis client, ConnectionContext state) throws DynoException {
return client.hgetAll(key);
}
});
}
@Override
public Long rpush(byte[] key, byte[]... args) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long lpush(byte[] key, byte[]... args) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long llen(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<byte[]> lrange(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String ltrim(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] lindex(byte[] key, long index) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public String lset(byte[] key, long index, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long lrem(byte[] key, long count, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] lpop(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] rpop(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long sadd(final byte[] key, final byte[]... members) {
return d_sadd(key, members).getResult();
}
public OperationResult<Long> d_sadd(final byte[] key, final byte[]... members) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.SADD) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.sadd(key, members);
}
});
}
@Override
public Set<byte[]> smembers(final byte[] key) {
return d_smembers(key).getResult();
}
public OperationResult<Set<byte[]>> d_smembers(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Set<byte[]>>(key, OpName.SMEMBERS) {
@Override
public Set<byte[]> execute(Jedis client, ConnectionContext state) {
return client.smembers(key);
}
});
}
@Override
public Long srem(final byte[] key, final byte[]... members) {
return d_srem(key, members).getResult();
}
public OperationResult<Long> d_srem(final byte[] key, final byte[]... members) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.SREM) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.srem(key, members);
}
});
}
@Override
public byte[] spop(final byte[] key) {
return d_spop(key).getResult();
}
public OperationResult<byte[]> d_spop(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<byte[]>(key, OpName.SPOP) {
@Override
public byte[] execute(Jedis client, ConnectionContext state) {
return client.spop(key);
}
});
}
@Override
public Set<byte[]> spop(byte[] key, long count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long scard(final byte[] key) {
return d_scard(key).getResult();
}
public OperationResult<Long> d_scard(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.SCARD) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.scard(key);
}
});
}
@Override
public Boolean sismember(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public byte[] srandmember(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<byte[]> srandmember(final byte[] key, final int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long strlen(final byte[] key) {
return d_strlen(key).getResult();
}
public OperationResult<Long> d_strlen(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.STRLEN) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.strlen(key);
}
});
}
@Override
public Long zadd(byte[] key, double score, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zadd(byte[] key, Map<byte[], Double> scoreMembers) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrange(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zrem(byte[] key, byte[]... member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Double zincrby(byte[] key, double score, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zrank(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zrevrank(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrevrange(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<Tuple> zrangeWithScores(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<Tuple> zrevrangeWithScores(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zcard(final byte[] key) {
return d_zcard(key).getResult();
}
public OperationResult<Long> d_zcard(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.ZCARD) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.zcard(key);
}
});
}
@Override
public Double zscore(final byte[] key, final byte[] member) {
return d_zscore(key, member).getResult();
}
public OperationResult<Double> d_zscore(final byte[] key, final byte[] member) {
return connPool.executeWithFailover(new BaseKeyOperation<Double>(key, OpName.ZSCORE) {
@Override
public Double execute(Jedis client, ConnectionContext state) {
return client.zscore(key, member);
}
});
}
@Override
public List<byte[]> sort(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<byte[]> sort(byte[] key, SortingParams sortingParameters) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zcount(byte[] key, double min, double max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zcount(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrangeByScore(byte[] key, double min, double max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrangeByScore(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrevrangeByScore(byte[] key, double max, double min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrangeByScore(byte[] key, double min, double max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrevrangeByScore(byte[] key, byte[] max, byte[] min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrangeByScore(byte[] key, byte[] min, byte[] max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrevrangeByScore(byte[] key, double max, double min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<Tuple> zrangeByScoreWithScores(byte[] key, double min, double max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(byte[] key, double max, double min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<Tuple> zrangeByScoreWithScores(byte[] key, double min, double max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrevrangeByScore(byte[] key, byte[] max, byte[] min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<Tuple> zrangeByScoreWithScores(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(byte[] key, byte[] max, byte[] min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<Tuple> zrangeByScoreWithScores(byte[] key, byte[] min, byte[] max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(byte[] key, double max, double min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(byte[] key, byte[] max, byte[] min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zremrangeByRank(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zremrangeByScore(byte[] key, double start, double end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zremrangeByScore(byte[] key, byte[] start, byte[] end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zlexcount(final byte[] key, final byte[] min, final byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrangeByLex(final byte[] key, final byte[] min, final byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrangeByLex(final byte[] key, final byte[] min, final byte[] max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrevrangeByLex(final byte[] key, final byte[] max, final byte[] min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Set<byte[]> zrevrangeByLex(final byte[] key, final byte[] max, final byte[] min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zremrangeByLex(final byte[] key, final byte[] min, final byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long linsert(byte[] key, ListPosition where, byte[] pivot, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long lpushx(byte[] key, byte[]... arg) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long rpushx(byte[] key, byte[]... arg) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long del(final byte[] key) {
return d_del(key).getResult();
}
public OperationResult<Long> d_del(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.DEL) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.del(key);
}
});
}
@Override
public Long unlink(byte[] key) {
return d_unlink(key).getResult();
}
public OperationResult<Long> d_unlink(final byte[] key) {
return connPool.executeWithFailover(new BaseKeyOperation<Long>(key, OpName.UNLINK) {
@Override
public Long execute(Jedis client, ConnectionContext state) {
return client.unlink(key);
}
});
}
@Override
public byte[] echo(byte[] arg) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long move(byte[] key, int dbIndex) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long bitcount(final byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long bitcount(final byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long pfadd(final byte[] key, final byte[]... elements) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public long pfcount(final byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
/**
* NOT SUPPORTED ! Use {@link #dyno_scan(CursorBasedResult, int, String...)}
* instead.
*
* @param cursor
* @return nothing -- throws UnsupportedOperationException when invoked
* @see #dyno_scan(CursorBasedResult, int, String...)
*/
@Override
public ScanResult<String> scan(String cursor) {
throw new UnsupportedOperationException("Not supported - use dyno_scan(String, CursorBasedResult");
}
public CursorBasedResult<String> dyno_scan(String... pattern) {
return this.dyno_scan(10, pattern);
}
public CursorBasedResult<String> dyno_scan(int count, String... pattern) {
return this.dyno_scan(null, count, pattern);
}
public CursorBasedResult<String> dyno_scan(CursorBasedResult<String> cursor, int count, String... pattern) {
if (cursor == null) {
// Create a temporary cursor context which will maintain a map of token to rack
cursor = new CursorBasedResultImpl<>(new LinkedHashMap<String, ScanResult<String>>());
}
final Map<String, ScanResult<String>> results = new LinkedHashMap<>();
List<OperationResult<ScanResult<String>>> opResults = scatterGatherScan(cursor, count, pattern);
for (OperationResult<ScanResult<String>> opResult : opResults) {
results.put(opResult.getNode().getHostAddress(), opResult.getResult());
}
return new CursorBasedResultImpl<>(results, ((TokenRackMapper) cursor).getTokenRackMap());
}
@Override
public String pfmerge(String destkey, String... sourcekeys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public long pfcount(String... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long touch(String... keys) {
throw new UnsupportedOperationException("not yet implemented");
}
private boolean validHashtag(final String hashtag) {
return !Strings.isNullOrEmpty(hashtag) && hashtag.length() == 2;
}
private String ehashDataKey(String key) {
String hashtag = connPool.getConfiguration().getHashtag();
if (!validHashtag(hashtag)) {
throw new IllegalStateException("hashtags not set");
}
return new StringBuilder(hashtag)
.insert(1, key)
.toString();
}
@VisibleForTesting
String ehashMetadataKey(String key) {
final String hashtag = connPool.getConfiguration().getHashtag();
if (!validHashtag(hashtag)) {
throw new IllegalStateException("hashtags not set");
}
return new StringBuilder(hashtag)
.insert(0, DYNO_EXIPREHASH_METADATA_KEYPREFIX)
.insert(DYNO_EXIPREHASH_METADATA_KEYPREFIX.length() + 1, key)
.toString();
}
private double timeInEpochSeconds(long ttl) {
final long timeSinceEpoch = System.currentTimeMillis() / 1000L;
return timeSinceEpoch + ttl;
}
@AllArgsConstructor
@Getter
private class EHMetadataUpdateResult {
private final String dataKey;
private final String metadataKey;
private final Set<String> expiredFields;
private final Response<Long> hdelResponse;
private final Response<Long> zremResponse;
}
private EHMetadataUpdateResult ehPurgeExpiredFields(DynoJedisPipeline pipeline, String key) {
final String metadataKey = ehashMetadataKey(key);
final String dataKey = ehashDataKey(key);
final double now = timeInEpochSeconds(0);
// get expired fields
final Set<String> expiredFields = this.zrangeByScore(metadataKey, 0, now);
Response<Long> hdelResponse = null;
Response<Long> zremResponse = null;
if (expiredFields.size() > 0) {
hdelResponse = pipeline.hdel(dataKey, expiredFields.toArray(new String[0]));
zremResponse = pipeline.zremrangeByScore(metadataKey, 0, now);
}
return new EHMetadataUpdateResult(dataKey, metadataKey, expiredFields, hdelResponse, zremResponse);
}
private void ehVerifyMetadataUpdate(EHMetadataUpdateResult ehMetadataUpdateResult) {
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get())) {
// If requested field is not in in the expired fields list, correctness of this request is not affected.
Logger.debug("Expire hash:{} inconsistent with metadata:{}. Failed to delete expired fields from hash",
ehMetadataUpdateResult.dataKey, ehMetadataUpdateResult.metadataKey);
}
// if fields were not deleted from metadata, correctness is not affected.
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.zremResponse != null &&
ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.zremResponse.get()) {
Logger.debug("Expire hash:{} inconsistent with metadata:{}. Failed to delete expired fields from metadata",
ehMetadataUpdateResult.dataKey, ehMetadataUpdateResult.metadataKey);
}
}
@Override
public Long ehset(final String key, final String field, final String value, final long ttl)
throws UnsupportedOperationException, DynoException {
final DynoJedisPipeline pipeline = this.pipelined();
final Response<Long> zResponse = pipeline.zadd(ehashMetadataKey(key), timeInEpochSeconds(ttl), field,
ZAddParams.zAddParams().ch());
final Response<Long> hResponse = pipeline.hset(ehashDataKey(key), field, value);
pipeline.sync();
return hResponse.get();
}
public Long ehsetnx(final String key, final String field, final String value, final long ttl) {
final String ehashDataKey = ehashDataKey(key);
final String ehashMetadataKey = ehashMetadataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
final Response<Long> zResponse = pipeline.zadd(ehashMetadataKey, timeInEpochSeconds(ttl), field,
ZAddParams.zAddParams().ch());
final Response<Long> hResponse = pipeline.hsetnx(ehashDataKey, field, value);
pipeline.sync();
// If metadata operation failed, remove the data and throw exception
if (!zResponse.get().equals(hResponse.get())) {
d_hdel(ehashDataKey, field);
d_zrem(ehashMetadataKey, field);
throw new DynoException("Metadata inconsistent with data for expireHash: " + ehashDataKey);
}
return hResponse.get();
}
@Override
public String ehget(final String key, final String field)
throws UnsupportedOperationException, DynoException {
final String dataKey = ehashDataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
final Response<String> getResponse = pipeline.hget(dataKey, field);
pipeline.sync();
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// Return failure if the requested field was expired and was not removed from the data
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get()) &&
ehMetadataUpdateResult.expiredFields.contains(field)) {
throw new DynoException("Failed to update expire hash metadata");
}
return getResponse.get();
}
@Override
public Long ehdel(final String key, final String... fields) {
final DynoJedisPipeline pipeline = this.pipelined();
final Response<Long> zResponse = pipeline.zrem(ehashMetadataKey(key), fields);
final Response<Long> hResponse = pipeline.hdel(ehashDataKey(key), fields);
pipeline.sync();
if (zResponse.get().compareTo(hResponse.get()) != 0) {
Logger.error("Operation: {} - data: {} and metadata: {} field count mismatch",
OpName.EHDEL, hResponse.get(), zResponse.get());
}
return hResponse.get();
}
@Override
public Boolean ehexists(final String key, final String field) {
final String dataKey = ehashDataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
final Response<Boolean> existsResponse = pipeline.hexists(dataKey, field);
pipeline.sync();
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// Return failure if the requested field was expired and was not removed from the data
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get()) &&
ehMetadataUpdateResult.expiredFields.contains(field)) {
throw new DynoException("Failed to update expire hash metadata");
}
return existsResponse.get();
}
@Override
public Map<String, String> ehgetall(final String key) {
final String dataKey = ehashDataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
final Response<Map<String, String>> getallResponse = pipeline.hgetAll(dataKey);
pipeline.sync();
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// on failure to remove all expired keys, fail
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get())) {
throw new DynoException("Failed to expire hash fields");
}
return getallResponse.get();
}
@Override
public Set<String> ehkeys(final String key) {
final String dataKey = ehashDataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
final Response<Set<String>> getkeysResponse = pipeline.hkeys(dataKey);
pipeline.sync();
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// on failure to remove all expired keys, fail
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get())) {
throw new DynoException("Failed to expire hash fields");
}
return getkeysResponse.get();
}
@Override
public List<String> ehvals(final String key) {
final String dataKey = ehashDataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
final Response<List<String>> getvalsResponse = pipeline.hvals(dataKey);
pipeline.sync();
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// on failure to remove all expired keys, fail
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get())) {
throw new DynoException("Failed to expire hash fields");
}
return getvalsResponse.get();
}
@Override
public List<String> ehmget(final String key, final String... fields) {
final String dataKey = ehashDataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
final Response<List<String>> mgetResponse = pipeline.hmget(dataKey, fields);
pipeline.sync();
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// on failure to remove all expired keys and expired keys contains one of requested fields, fail
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get()) &&
Arrays.stream(fields).anyMatch(ehMetadataUpdateResult.expiredFields::contains)) {
throw new DynoException("Failed to expire hash fields");
}
return mgetResponse.get();
}
@Override
public String ehmset(final String key, final Map<String, Pair<String, Long>> hash) {
final DynoJedisPipeline pipeline = this.pipelined();
Map<String, String> fields = new HashMap<>();
Map<String, Double> metadataFields = new HashMap<>();
hash.keySet().forEach(f -> {
fields.put(f, hash.get(f).getLeft());
metadataFields.put(f, timeInEpochSeconds(hash.get(f).getRight()));
});
final Response<Long> zResponse = pipeline.zadd(ehashMetadataKey(key), metadataFields,
ZAddParams.zAddParams().ch());
final Response<String> hResponse = pipeline.hmset(ehashDataKey(key), fields);
pipeline.sync();
return hResponse.get();
}
@Override
public ScanResult<Map.Entry<String, String>> ehscan(final String key, final String cursor) {
final String dataKey = ehashDataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
if (ehMetadataUpdateResult.expiredFields.size() > 0) {
pipeline.sync();
} else {
pipeline.discardPipelineAndReleaseConnection();
}
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// on failure to remove all expired keys, fail
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get())) {
throw new DynoException("Failed to expire hash fields");
}
return hscan(dataKey, cursor);
}
@Override
public Long ehincrby(final String key, final String field, final long value) {
final String dataKey = ehashDataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
final Response<Long> incrbyResponse = pipeline.hincrBy(dataKey, field, value);
pipeline.sync();
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// on failure to remove all expired keys and expired keys contains requested field, fail
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get()) &&
ehMetadataUpdateResult.expiredFields.contains(field)) {
throw new DynoException("Failed to expire hash fields");
}
return incrbyResponse.get();
}
@Override
public Double ehincrbyfloat(final String key, final String field, final double value) {
final String dataKey = ehashDataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
final Response<Double> incrbyFloatResponse = pipeline.hincrByFloat(dataKey, field, value);
pipeline.sync();
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// on failure to remove all expired keys and expired keys contains requested field, fail
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get()) &&
ehMetadataUpdateResult.expiredFields.contains(field)) {
throw new DynoException("Failed to expire hash fields");
}
return incrbyFloatResponse.get();
}
@Override
public Long ehlen(final String key) {
final String dataKey = ehashDataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
final Response<Long> hlenResponse = pipeline.hlen(dataKey);
pipeline.sync();
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// on failure to remove all expired keys, fail
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get())) {
throw new DynoException("Failed to expire hash fields");
}
return hlenResponse.get();
}
@Override
public String ehrename(final String oldKey, final String newKey) {
final String dataOldKey = ehashDataKey(oldKey);
final String dataNewKey = ehashDataKey(newKey);
final String metadataOldKey = ehashMetadataKey(oldKey);
final String metadataNewKey = ehashMetadataKey(newKey);
final DynoJedisPipeline pipeline = this.pipelined();
final Response<String> zrenameResponse = pipeline.rename(metadataOldKey, metadataNewKey);
final Response<String> hrenameResponse = pipeline.rename(dataOldKey, dataNewKey);
pipeline.sync();
if (zrenameResponse.get().compareTo("OK") != 0) {
rename(dataNewKey, dataOldKey);
throw new DynoException("Unable to rename key: " + metadataOldKey + " to key:" + metadataNewKey);
}
return hrenameResponse.get();
}
@Override
public Long ehrenamenx(final String oldKey, final String newKey) {
final String dataOldKey = ehashDataKey(oldKey);
final String dataNewKey = ehashDataKey(newKey);
final String metadataOldKey = ehashMetadataKey(oldKey);
final String metadataNewKey = ehashMetadataKey(newKey);
final DynoJedisPipeline pipeline = this.pipelined();
final Response<Long> zrenamenxResponse = pipeline.renamenx(metadataOldKey, metadataNewKey);
final Response<Long> hrenamenxResponse = pipeline.renamenx(dataOldKey, dataNewKey);
pipeline.sync();
if (zrenamenxResponse.get() != 1 && hrenamenxResponse.get() == 1) {
rename(dataNewKey, dataOldKey);
throw new DynoException("Unable to rename key: " + metadataOldKey + " to key:" + metadataNewKey);
}
return hrenamenxResponse.get();
}
@Override
public Long ehexpire(final String key, final int seconds) {
final String dataKey = ehashDataKey(key);
final String metadataKey = ehashMetadataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
Response<Long> metadataExpireResponse = pipeline.expire(metadataKey, seconds);
Response<Long> dataExpireResponse = pipeline.expire(dataKey, seconds);
pipeline.sync();
if (metadataExpireResponse.get().compareTo(dataExpireResponse.get()) != 0) {
throw new DynoException("Metadata and data timeout do not match");
}
return dataExpireResponse.get();
}
@Override
public Long ehexpireat(final String key, final long timestamp) {
final String dataKey = ehashDataKey(key);
final String metadataKey = ehashMetadataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
Response<Long> metadataExpireResponse = pipeline.expireAt(metadataKey, timestamp);
Response<Long> dataExpireResponse = pipeline.expireAt(dataKey, timestamp);
pipeline.sync();
if (metadataExpireResponse.get().compareTo(dataExpireResponse.get()) != 0) {
throw new DynoException("Metadata and data timeout do not match");
}
return dataExpireResponse.get();
}
@Override
public Long ehpexpireat(final String key, final long timestamp) {
final String dataKey = ehashDataKey(key);
final String metadataKey = ehashMetadataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
Response<Long> metadataExpireResponse = pipeline.pexpireAt(metadataKey, timestamp);
Response<Long> dataExpireResponse = pipeline.pexpireAt(dataKey, timestamp);
pipeline.sync();
if (metadataExpireResponse.get().compareTo(dataExpireResponse.get()) != 0) {
throw new DynoException("Metadata and data timeout do not match");
}
return dataExpireResponse.get();
}
@Override
public Long ehpersist(final String key) {
final String dataKey = ehashDataKey(key);
final String metadataKey = ehashMetadataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
Response<Long> metadataPersistResponse = pipeline.persist(metadataKey);
Response<Long> dataPersistResponse = pipeline.persist(dataKey);
pipeline.sync();
if (metadataPersistResponse.get().compareTo(dataPersistResponse.get()) != 0) {
throw new DynoException("Metadata and data expiry do not match");
}
return dataPersistResponse.get();
}
@Override
public Long ehttl(final String key) {
return ttl(ehashDataKey(key));
}
@Override
public Long ehttl(final String key, final String field) {
double now = timeInEpochSeconds(0);
final String metadataKey = ehashMetadataKey(key);
final DynoJedisPipeline pipeline = this.pipelined();
EHMetadataUpdateResult ehMetadataUpdateResult = ehPurgeExpiredFields(pipeline, key);
final Response<Double> zscoreResponse = pipeline.zscore(metadataKey, field);
pipeline.sync();
// verify if all expired fields were removed from data and metadata
ehVerifyMetadataUpdate(ehMetadataUpdateResult);
// on failure to remove all expired keys and expired keys contains requested field, fail
if (ehMetadataUpdateResult.expiredFields.size() > 0 && ehMetadataUpdateResult.hdelResponse != null &&
(ehMetadataUpdateResult.expiredFields.size() != ehMetadataUpdateResult.hdelResponse.get()) &&
ehMetadataUpdateResult.expiredFields.contains(field)) {
throw new DynoException("Failed to expire hash fields");
}
if (zscoreResponse.get() > 0) {
return zscoreResponse.get().longValue() - (long) now;
} else {
return zscoreResponse.get().longValue();
}
}
@Override
public Long ehpttl(final String key) {
return pttl(ehashDataKey(key));
}
@Override
public Long ehpttl(final String key, final String field) {
return ehttl(key, field);
}
public void stopClient() {
if (pipelineMonitor.get() != null) {
pipelineMonitor.get().stop();
}
this.connPool.shutdown();
}
public DynoJedisPipeline pipelined() {
return new DynoJedisPipeline(getConnPool(), checkAndInitPipelineMonitor(), getConnPool().getMonitor());
}
private DynoJedisPipelineMonitor checkAndInitPipelineMonitor() {
if (pipelineMonitor.get() != null) {
return pipelineMonitor.get();
}
int flushTimerFrequency = this.connPool.getConfiguration().getTimingCountersResetFrequencySeconds();
DynoJedisPipelineMonitor plMonitor = new DynoJedisPipelineMonitor(appName, flushTimerFrequency);
boolean success = pipelineMonitor.compareAndSet(null, plMonitor);
if (success) {
pipelineMonitor.get().init();
}
return pipelineMonitor.get();
}
public static class Builder {
private String appName;
private String clusterName;
private ConnectionPoolConfigurationImpl cpConfig;
private HostSupplier hostSupplier;
private EurekaClient discoveryClient;
private String dualWriteClusterName;
private HostSupplier dualWriteHostSupplier;
private DynoDualWriterClient.Dial dualWriteDial;
private ConnectionPoolMonitor cpMonitor;
private SSLSocketFactory sslSocketFactory;
private TokenMapSupplier tokenMapSupplier;
private TokenMapSupplier dualWriteTokenMapSupplier;
private boolean isDatastoreClient;
private String connectionPoolConsistency;
public Builder() {
}
public Builder withApplicationName(String applicationName) {
appName = applicationName;
return this;
}
public Builder withDynomiteClusterName(String cluster) {
clusterName = cluster;
return this;
}
public Builder withCPConfig(ConnectionPoolConfigurationImpl config) {
cpConfig = config;
return this;
}
public Builder withHostSupplier(HostSupplier hSupplier) {
hostSupplier = hSupplier;
return this;
}
public Builder withTokenMapSupplier(TokenMapSupplier tokenMapSupplier) {
this.tokenMapSupplier = tokenMapSupplier;
return this;
}
@Deprecated
public Builder withDiscoveryClient(DiscoveryClient client) {
discoveryClient = client;
return this;
}
public Builder withDiscoveryClient(EurekaClient client) {
discoveryClient = client;
return this;
}
public Builder withDualWriteClusterName(String dualWriteCluster) {
dualWriteClusterName = dualWriteCluster;
return this;
}
public Builder withDualWriteHostSupplier(HostSupplier dualWriteHostSupplier) {
this.dualWriteHostSupplier = dualWriteHostSupplier;
return this;
}
public Builder withDualWriteTokenMapSupplier(TokenMapSupplier dualWriteTokenMapSupplier) {
this.dualWriteTokenMapSupplier = dualWriteTokenMapSupplier;
return this;
}
public Builder withDualWriteDial(DynoDualWriterClient.Dial dial) {
this.dualWriteDial = dial;
return this;
}
public Builder withConnectionPoolMonitor(ConnectionPoolMonitor cpMonitor) {
this.cpMonitor = cpMonitor;
return this;
}
public Builder withSSLSocketFactory(SSLSocketFactory sslSocketFactory) {
this.sslSocketFactory = sslSocketFactory;
return this;
}
public Builder isDatastoreClient(boolean isDatastoreClient) {
this.isDatastoreClient = isDatastoreClient;
return this;
}
public Builder withConnectionPoolConsistency(String consistency) {
this.connectionPoolConsistency = consistency;
return this;
}
public DynoJedisClient build() {
assert (appName != null);
assert (clusterName != null);
// Make sure that the user doesn't set isDatastoreClient and connectionPoolConsistency together.
if (this.isDatastoreClient == true && this.connectionPoolConsistency != null) {
throw new DynoException("Cannot set isDatastoreClient(true) and also set withConnectionPoolConsistency() together");
}
ArchaiusConnectionPoolConfiguration archaiusConfig = new ArchaiusConnectionPoolConfiguration(appName);
if (cpConfig == null) {
cpConfig = archaiusConfig;
Logger.info("Dyno Client runtime properties: " + cpConfig.toString());
} else {
// Based on current requirements, we currently only want to prioritize pulling in the following FPs
// if provided:
// 'dualwrite.enabled', 'dualwrite.cluster', 'dualwrite.percentage'
// TODO: Move to a clean generic userconfig + FP model.
if (!cpConfig.isDualWriteEnabled() && archaiusConfig.isDualWriteEnabled()) {
// If a user sets these configs explicitly, they take precedence over the FP values.
if (cpConfig.getDualWriteClusterName() == null) {
cpConfig.setDualWriteClusterName(archaiusConfig.getDualWriteClusterName());
}
if (cpConfig.getDualWritePercentage() == 0) {
cpConfig.setDualWritePercentage(archaiusConfig.getDualWritePercentage());
}
cpConfig.setDualWriteEnabled(true);
}
}
cpConfig.setConnectToDatastore(isDatastoreClient);
// If a connection-pool level consistency setting was provided, add it here.
if (this.connectionPoolConsistency != null) {
cpConfig.setConnectionPoolConsistency(connectionPoolConsistency);
}
if (cpConfig.isDualWriteEnabled()) {
return buildDynoDualWriterClient();
} else {
return buildDynoJedisClient();
}
}
private DynoDualWriterClient buildDynoDualWriterClient() {
ConnectionPoolConfigurationImpl shadowConfig = new ConnectionPoolConfigurationImpl(cpConfig);
Logger.info("Dyno Client Shadow Config runtime properties: " + shadowConfig.toString());
// Ensure that if the shadow cluster is down it will not block
// client application startup
shadowConfig.setFailOnStartupIfNoHosts(false);
//Initialize the Host Supplier
HostSupplier shadowSupplier;
if (dualWriteHostSupplier == null) {
if (hostSupplier != null && hostSupplier instanceof EurekaHostsSupplier) {
EurekaHostsSupplier eurekaSupplier = (EurekaHostsSupplier) hostSupplier;
shadowSupplier = EurekaHostsSupplier.newInstance(shadowConfig.getDualWriteClusterName(),
eurekaSupplier);
} else if (discoveryClient != null) {
shadowSupplier = new EurekaHostsSupplier(shadowConfig.getDualWriteClusterName(), discoveryClient);
} else {
throw new DynoConnectException("HostSupplier for DualWrite cluster is REQUIRED if you are not "
+ "using EurekaHostsSupplier implementation or using a EurekaClient");
}
} else {
shadowSupplier = dualWriteHostSupplier;
}
shadowConfig.withHostSupplier(shadowSupplier);
if (dualWriteTokenMapSupplier != null)
shadowConfig.withTokenSupplier(dualWriteTokenMapSupplier);
String shadowAppName = shadowConfig.getName();
DynoCPMonitor shadowCPMonitor = new DynoCPMonitor(shadowAppName);
DynoOPMonitor shadowOPMonitor = new DynoOPMonitor(shadowAppName);
DynoJedisUtils.updateConnectionPoolConfig(shadowConfig, shadowSupplier, dualWriteTokenMapSupplier, discoveryClient, clusterName);
final ConnectionPool<Jedis> shadowPool = DynoJedisUtils.createConnectionPool(shadowAppName, shadowOPMonitor, shadowCPMonitor, shadowConfig,
sslSocketFactory);
// Construct a connection pool with the shadow cluster settings
DynoJedisClient shadowClient = new DynoJedisClient(shadowAppName, dualWriteClusterName, shadowPool,
shadowOPMonitor, shadowCPMonitor);
// Construct an instance of our DualWriter client
DynoOPMonitor opMonitor = new DynoOPMonitor(appName);
ConnectionPoolMonitor cpMonitor = (this.cpMonitor == null) ? new DynoCPMonitor(appName) : this.cpMonitor;
DynoJedisUtils.updateConnectionPoolConfig(cpConfig, dualWriteHostSupplier, dualWriteTokenMapSupplier, discoveryClient, clusterName);
final ConnectionPool<Jedis> pool = DynoJedisUtils.createConnectionPool(appName, opMonitor, cpMonitor, cpConfig, sslSocketFactory);
if (dualWriteDial != null) {
if (shadowConfig.getDualWritePercentage() > 0) {
dualWriteDial.setRange(shadowConfig.getDualWritePercentage());
}
return new DynoDualWriterClient(appName, clusterName, pool, opMonitor, cpMonitor, shadowClient,
dualWriteDial);
} else {
return new DynoDualWriterClient(appName, clusterName, pool, opMonitor, cpMonitor, shadowClient);
}
}
private DynoJedisClient buildDynoJedisClient() {
DynoOPMonitor opMonitor = new DynoOPMonitor(appName);
ConnectionPoolMonitor cpMonitor = (this.cpMonitor == null) ? new DynoCPMonitor(appName) : this.cpMonitor;
DynoJedisUtils.updateConnectionPoolConfig(cpConfig, hostSupplier, tokenMapSupplier, discoveryClient,
clusterName);
final ConnectionPool<Jedis> pool = DynoJedisUtils.createConnectionPool(appName, opMonitor, cpMonitor,
cpConfig, sslSocketFactory);
return new DynoJedisClient(appName, clusterName, pool, opMonitor, cpMonitor);
}
}
/**
* Used for unit testing ONLY
*/
/* package */ static class TestBuilder {
private ConnectionPool cp;
private String appName;
public TestBuilder withAppname(String appName) {
this.appName = appName;
return this;
}
public TestBuilder withConnectionPool(ConnectionPool cp) {
this.cp = cp;
return this;
}
public DynoJedisClient build() {
return new DynoJedisClient(appName, "TestCluster", cp, null, null);
}
}
@Override
public ScanResult<String> scan(String arg0, ScanParams arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long geoadd(byte[] arg0, Map<byte[], GeoCoordinate> arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long geoadd(byte[] arg0, double arg1, double arg2, byte[] arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Double geodist(byte[] arg0, byte[] arg1, byte[] arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Double geodist(byte[] arg0, byte[] arg1, byte[] arg2, GeoUnit arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<byte[]> geohash(byte[] arg0, byte[]... arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoCoordinate> geopos(byte[] arg0, byte[]... arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadius(byte[] arg0, double arg1, double arg2, double arg3, GeoUnit arg4) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(byte[] key, double longitude, double latitude, double radius, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadius(byte[] arg0, double arg1, double arg2, double arg3, GeoUnit arg4,
GeoRadiusParam arg5) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(byte[] key, double longitude, double latitude, double radius, GeoUnit unit, GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusByMember(byte[] arg0, byte[] arg1, double arg2, GeoUnit arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(byte[] key, byte[] member, double radius, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusByMember(byte[] arg0, byte[] arg1, double arg2, GeoUnit arg3,
GeoRadiusParam arg4) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(byte[] key, byte[] member, double radius, GeoUnit unit, GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public ScanResult<Entry<byte[], byte[]>> hscan(byte[] arg0, byte[] arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public ScanResult<Entry<byte[], byte[]>> hscan(byte[] arg0, byte[] arg1, ScanParams arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public ScanResult<byte[]> sscan(byte[] arg0, byte[] arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public ScanResult<byte[]> sscan(byte[] arg0, byte[] arg1, ScanParams arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zadd(byte[] arg0, Map<byte[], Double> arg1, ZAddParams arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zadd(byte[] arg0, double arg1, byte[] arg2, ZAddParams arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Double zincrby(byte[] arg0, double arg1, byte[] arg2, ZIncrByParams arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public ScanResult<Tuple> zscan(byte[] arg0, byte[] arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public ScanResult<Tuple> zscan(byte[] arg0, byte[] arg1, ScanParams arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<Long> bitfield(byte[] key, byte[]... arguments) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long hstrlen(byte[] key, byte[] field) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long bitpos(String arg0, boolean arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long bitpos(String arg0, boolean arg1, BitPosParams arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long geoadd(String arg0, Map<String, GeoCoordinate> arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long geoadd(String arg0, double arg1, double arg2, String arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Double geodist(String arg0, String arg1, String arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Double geodist(String arg0, String arg1, String arg2, GeoUnit arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<String> geohash(String arg0, String... arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoCoordinate> geopos(String arg0, String... arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadius(String arg0, double arg1, double arg2, double arg3, GeoUnit arg4) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(String key, double longitude, double latitude, double radius, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadius(String arg0, double arg1, double arg2, double arg3, GeoUnit arg4,
GeoRadiusParam arg5) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(String key, double longitude, double latitude, double radius, GeoUnit unit, GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusByMember(String arg0, String arg1, double arg2, GeoUnit arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(String key, String member, double radius, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusByMember(String arg0, String arg1, double arg2, GeoUnit arg3,
GeoRadiusParam arg4) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public List<Long> bitfield(String key, String... arguments) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long hstrlen(String key, String field) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public ScanResult<Entry<String, String>> hscan(String arg0, String arg1, ScanParams arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Long zadd(String arg0, Map<String, Double> arg1, ZAddParams arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Double zincrby(String arg0, double arg1, String arg2, ZIncrByParams arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public ScanResult<Tuple> zscan(String arg0, String arg1, ScanParams arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
}
| 6,123 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/DynoJedisModule.java | package com.netflix.dyno.jedis;
import com.google.inject.AbstractModule;
import com.google.inject.Singleton;
@Singleton
public class DynoJedisModule extends AbstractModule {
public DynoJedisModule() {
}
@Override
public int hashCode() {
return getClass().hashCode();
}
@Override
public boolean equals(Object obj) {
return (obj != null) && (obj.getClass() == getClass());
}
@Override
protected void configure() {
}
}
| 6,124 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/CursorBasedResultImpl.java | /*******************************************************************************
* Copyright 2015 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.CursorBasedResult;
import com.netflix.dyno.connectionpool.TokenRackMapper;
import redis.clients.jedis.ScanResult;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* Encapsulates the results of performing a distributed SCAN operation.
* <p>
* Example usage
* <pre>
* CursorBasedResult<String> cbi = null;
* do {
* cbi = client.dyno_scan(cbi, "regex_pattern");
* .
* .
* .
* } while (!cbi.isComplete());
* </pre>
*/
public class CursorBasedResultImpl<T> implements CursorBasedResult<T>, TokenRackMapper {
private final Map<String, ScanResult<T>> result;
private Map<Long, String> tokenRackMap;
/* package private */ CursorBasedResultImpl(Map<String, ScanResult<T>> result) {
this.result = result;
this.tokenRackMap = new LinkedHashMap<>();
}
/* package private */ CursorBasedResultImpl(Map<String, ScanResult<T>> result, Map<Long, String> tokenRackMap) {
this.result = result;
this.tokenRackMap = tokenRackMap;
}
@Override
public List<T> getResult() {
final List<T> aggregated = new ArrayList<>();
for (ScanResult<T> sr : result.values()) {
aggregated.addAll(sr.getResult());
}
return aggregated;
}
@Override
public List<String> getStringResult() {
final List<String> aggregated = new ArrayList<>();
for (Map.Entry<String, ScanResult<T>> entry : result.entrySet()) {
aggregated.add(String.format("%s -> %s", entry.getKey(), entry.getValue().getCursor()));
}
return aggregated;
}
@Override
public String getCursorForHost(String host) {
ScanResult<T> sr = result.get(host);
if (sr != null) {
return sr.getCursor();
}
return "0";
}
@Override
public boolean isComplete() {
for (ScanResult r : result.values()) {
if (!r.getCursor().equals("0")) {
return false;
}
}
return true;
}
@Override
public String getRackForToken(Long token) {
return tokenRackMap.get(token);
}
public void setRackForToken(Long token, String rack) {
tokenRackMap.put(token, rack);
}
@Override
public Map<Long, String> getTokenRackMap() {
return this.tokenRackMap;
}
}
| 6,125 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/DynoJedisCommands.java | /* ******************************************************************************
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* *****************************************************************************/
package com.netflix.dyno.jedis;
import org.apache.commons.lang3.tuple.Pair;
import redis.clients.jedis.ScanResult;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Jedis commands specific to Dynomite client.
*/
public interface DynoJedisCommands {
/**
* Sets field in the expire hash stored at key to value and ttl.
*
* @param key expire hash key
* @param field hash field
* @param value field value
* @param ttl time to live, in seconds
* @return 1 - if field is a new field in the expire hash and value, ttl was set.
* 0 - if field already exists and the value, ttl was updated.
*/
Long ehset(String key, String field, String value, long ttl);
/**
* Gets the value stored at field in expire hash with key
*
* @param key expire hash key
* @param field hash field
* @return String value, if exists
* null - if field doesn't exist.
*/
String ehget(String key, String field);
/**
* Delete the field in the expire hash with key
*
* @param key expire hash key
* @param fields hash field
* @return number of fields deleted.
*/
Long ehdel(String key, String... fields);
/**
* Check if a field exists in the expire hash with key
*
* @param key expire hash key
* @param field hash field
* @return returns true if field exists, false otherwise.
*/
Boolean ehexists(String key, String field);
/**
* Get all fields and its values in the expire hash
*
* @param key expire hash key
* @return Map of all fields and its values.
*/
Map<String, String> ehgetall(String key);
/**
* Get all fields in the expire hash
*
* @param key expire hash key
* @return Set of all fields.
*/
Set<String> ehkeys(String key);
/**
* Get all values stored in the expire hash
*
* @param key expire hash key
* @return List of all values stored.
*/
List<String> ehvals(String key);
/**
* Get multiple fields from the expire hash
*
* @param key expire hash key
* @param fields hash fields
* @return List of requested field values.
*/
List<String> ehmget(String key, String... fields);
/**
* Set multiple fields in the expire hash
*
* @param key expire hash key
* @param hash Tuple of field, value and its TTL
* @return returns "OK" if the values were set.
*/
String ehmset(String key, Map<String, Pair<String, Long>> hash);
/**
* Set a field in the expire hash only if it doesn't exist already
*
* @param key expire hash key
* @param field hash field
* @param value field value
* @param ttl time to live
* @return returns 1 if field set or 0 otherwise.
*/
Long ehsetnx(String key, String field, String value, long ttl);
/**
* Scan fields in the expire hash
*
* @param key expire hashkey
* @param cursor cursor
* @return Map of fields and values in the expire hash.
*/
ScanResult<Map.Entry<String, String>> ehscan(String key, String cursor);
/**
* Increase current value stored in the field of the expire hash
*
* @param key expire hash key
* @param field hash field
* @param value value to increment by
* @return Value of the field after the increment.
*/
Long ehincrby(String key, String field, long value);
/**
* Increase current value stored in the field by a double value
*
* @param key expire hash key
* @param field hash field
* @param value value to increment by
* @return Value of the field after the increment.
*/
Double ehincrbyfloat(String key, String field, double value);
/**
* Number of fields stored in the expire hash
*
* @param key expire hash key
* @return Count of fields in the expire hash.
*/
Long ehlen(String key);
/**
* Rename the expire hash key.
*
* @param oldKey old expire hash key
* @param newKey new expire hash key
* @return returns "OK" if rename was successful.
*/
String ehrename(String oldKey, String newKey);
/**
* Rename the expire hash key if the new key doesn't already exist
*
* @param oldKey old expire hash key
* @param newKey new expire hash key
* @return returns 1 if rename was successful, 0 otherwise.
*/
Long ehrenamenx(String oldKey, String newKey);
/**
* Set expiry on the expire hash
*
* @param key expire hash key
* @param seconds expiry in seconds
* @return returns 1 if timeout was set, 0 otherwise.
*/
Long ehexpire(String key, int seconds);
/**
* Set expiry on the expire hash
*
* @param key expire hash key
* @param timestamp expiry in unix timestamp (seconds)
* @return returns 1 if timeout was set, 0 otherwise.
*/
Long ehexpireat(String key, long timestamp);
/**
* Set expiry on the expire hash
*
* @param key expire hash key
* @param timestamp expiry in unix timestamp (milliseconds)
* @return returns 1 if timeout was set, 0 otherwise.
*/
Long ehpexpireat(String key, long timestamp);
/**
* Remove existing timeout on the expire hash
*
* @param key expire hash key
* @return returns 1 if timeout was removed, 0 otherwise.
*/
Long ehpersist(String key);
/**
* Returns the remaining time on the expire hash
*
* @param key expire hash key
* @return returns -2 if key does not exist, -1 if the timeout is not set, otherwise returns remaining time.
*/
Long ehttl(String key);
/**
* Returns the remaining time on the expire hash field
*
* @param key expire hash key
* @param field hash field
* @return returns remaining time in seconds if key and field exists, 0 otherwise.
*/
Long ehttl(String key, String field);
/**
* Returns the remaining time on the expire hash
*
* @param key expire hash key
* @return returns -2 if key does not exist, -1 if the timeout is not set, otherwise returns remaining time
* in milliseconds.
*/
Long ehpttl(String key);
/**
* Returns the remaining time on the expire hash field
*
* @param key expire hash key
* @param field hash field
* @return returns remaining time in milliseconds if key and field exists, 0 otherwise.
*/
Long ehpttl(String key, String field);
}
| 6,126 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/DynoJedisPipelineMonitor.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.jedis;
import java.util.Map;
import java.util.concurrent.*;
import com.netflix.dyno.connectionpool.impl.utils.EstimatedHistogram;
import com.netflix.dyno.contrib.EstimatedHistogramBasedCounter.EstimatedHistogramMean;
import com.netflix.dyno.contrib.EstimatedHistogramBasedCounter.EstimatedHistogramPercentile;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.monitor.BasicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.tag.BasicTag;
import org.slf4j.LoggerFactory;
public class DynoJedisPipelineMonitor {
private static final org.slf4j.Logger Logger = LoggerFactory.getLogger(DynoJedisPipelineMonitor.class);
private final ConcurrentHashMap<String, BasicCounter> counterMap = new ConcurrentHashMap<String, BasicCounter>();
private final String appName;
private final BasicCounter pipelineSync;
private final BasicCounter pipelineDiscard;
private final PipelineTimer timer;
private final PipelineSendTimer sendTimer;
private final int resetTimingsFrequencyInSeconds;
private final ScheduledExecutorService threadPool = Executors.newScheduledThreadPool(1, new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
return new Thread(r, "DynoJedisPipelineMonitor");
}
});
public DynoJedisPipelineMonitor(String applicationName, int resetTimingsFrequencyInSeconds) {
appName = applicationName;
pipelineSync = getNewPipelineCounter("SYNC");
pipelineDiscard = getNewPipelineCounter("DISCARD");
timer = new PipelineTimer(appName);
sendTimer = new PipelineSendTimer(appName);
this.resetTimingsFrequencyInSeconds = resetTimingsFrequencyInSeconds;
}
public DynoJedisPipelineMonitor(String applicationName) {
this(applicationName, 0);
}
public void init() {
// register the counters
DefaultMonitorRegistry.getInstance().register(pipelineSync);
DefaultMonitorRegistry.getInstance().register(pipelineDiscard);
// register the pipeline timer
DefaultMonitorRegistry.getInstance().register(timer.latMean);
DefaultMonitorRegistry.getInstance().register(timer.lat99);
DefaultMonitorRegistry.getInstance().register(timer.lat995);
DefaultMonitorRegistry.getInstance().register(timer.lat999);
// NOTE -- pipeline 'send' timers are created on demand and are registered
// in PipelineSendTimer.getOrCreateHistogram()
Logger.debug(String.format("Initializing DynoJedisPipelineMonitor with timing counter reset frequency %d",
resetTimingsFrequencyInSeconds));
if (resetTimingsFrequencyInSeconds > 0) {
threadPool.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
timer.reset();
sendTimer.reset();
}
}, 1, resetTimingsFrequencyInSeconds, TimeUnit.SECONDS);
}
}
public void recordOperation(String opName) {
getOrCreateCounter(opName).increment();
}
public void recordPipelineSync() {
pipelineSync.increment();
}
public void recordPipelineDiscard() {
pipelineDiscard.increment();
}
public void recordLatency(long duration, TimeUnit unit) {
timer.recordLatency(duration, unit);
}
public void recordSendLatency(String opName, long duration, TimeUnit unit) {
sendTimer.recordLatency(opName, duration, unit);
}
public void stop() {
threadPool.shutdownNow();
}
private BasicCounter getOrCreateCounter(String opName) {
BasicCounter counter = counterMap.get(opName);
if (counter != null) {
return counter;
}
counter = getNewPipelineCounter(opName);
BasicCounter prevCounter = counterMap.putIfAbsent(opName, counter);
if (prevCounter != null) {
return prevCounter;
}
DefaultMonitorRegistry.getInstance().register(counter);
return counter;
}
private BasicCounter getNewPipelineCounter(String opName) {
String metricName = "Dyno__" + appName + "__PL__" + opName;
MonitorConfig config = MonitorConfig.builder(metricName)
.withTag(new BasicTag("dyno_pl_op", opName))
.build();
return new BasicCounter(config);
}
/**
* This class measures the latency of a sync() or syncAndReturnAll() operation, which is the time
* it takes the client to receive the response of all operations in the pipeline.
*/
private class PipelineTimer {
private final EstimatedHistogramMean latMean;
private final EstimatedHistogramPercentile lat99;
private final EstimatedHistogramPercentile lat995;
private final EstimatedHistogramPercentile lat999;
private final EstimatedHistogram estHistogram;
private PipelineTimer(String appName) {
estHistogram = new EstimatedHistogram();
latMean = new EstimatedHistogramMean("Dyno__" + appName + "__PL__latMean", "PL", "dyno_pl_op", estHistogram);
lat99 = new EstimatedHistogramPercentile("Dyno__" + appName + "__PL__lat990", "PL", "dyno_pl_op", estHistogram, 0.99);
lat995 = new EstimatedHistogramPercentile("Dyno__" + appName + "__PL__lat995", "PL", "dyno_pl_op", estHistogram, 0.995);
lat999 = new EstimatedHistogramPercentile("Dyno__" + appName + "__PL__lat999", "PL", "dyno_pl_op", estHistogram, 0.999);
}
public void recordLatency(long duration, TimeUnit unit) {
long durationMicros = TimeUnit.MICROSECONDS.convert(duration, unit);
estHistogram.add(durationMicros);
}
public void reset() {
Logger.info("resetting histogram");
estHistogram.getBuckets(true);
}
}
/**
* This class measures the time it takes to send a request from the client to the server via the pipeline. The
* 'send' is not asynchronous within the Jedis client
*/
private class PipelineSendTimer {
private final Map<String, EstimatedHistogramMean> histograms = new ConcurrentHashMap<String, EstimatedHistogramMean>();
private final String appName;
private PipelineSendTimer(String appName) {
this.appName = appName;
}
public void recordLatency(String opName, long duration, TimeUnit unit) {
long durationMicros = TimeUnit.MICROSECONDS.convert(duration, unit);
getOrCreateHistogram(opName).add(durationMicros);
}
private EstimatedHistogramMean getOrCreateHistogram(String opName) {
if (histograms.containsKey(opName)) {
return histograms.get(opName);
} else {
EstimatedHistogram histogram = new EstimatedHistogram();
EstimatedHistogramMean histogramMean =
new EstimatedHistogramMean("Dyno__" + appName + "__PL__latMean", "PL_SEND", opName, histogram);
histograms.put(opName, histogramMean);
DefaultMonitorRegistry.getInstance().register(histogramMean);
return histogramMean;
}
}
public void reset() {
Logger.info("resetting all SEND histograms");
for (EstimatedHistogramMean hm : histograms.values()) {
hm.reset();
}
}
}
}
| 6,127 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/JedisGenericOperation.java | /*******************************************************************************
* Copyright 2018 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.Operation;
import lombok.Getter;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.util.SafeEncoder;
@Getter
public abstract class JedisGenericOperation<R> implements Operation<Jedis, R> {
private final String name;
private final String stringKey;
private final byte[] binaryKey;
public JedisGenericOperation(String key, String opName) {
this.stringKey = key;
this.binaryKey = SafeEncoder.encode(key);
this.name = opName;
}
}
| 6,128 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/JedisConnectionFactory.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.AsyncOperation;
import com.netflix.dyno.connectionpool.Connection;
import com.netflix.dyno.connectionpool.ConnectionContext;
import com.netflix.dyno.connectionpool.ConnectionFactory;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostConnectionPool;
import com.netflix.dyno.connectionpool.ListenableFuture;
import com.netflix.dyno.connectionpool.Operation;
import com.netflix.dyno.connectionpool.OperationMonitor;
import com.netflix.dyno.connectionpool.OperationResult;
import com.netflix.dyno.connectionpool.exception.DynoConnectException;
import com.netflix.dyno.connectionpool.exception.DynoException;
import com.netflix.dyno.connectionpool.exception.FatalConnectionException;
import com.netflix.dyno.connectionpool.impl.ConnectionContextImpl;
import com.netflix.dyno.connectionpool.impl.OperationResultImpl;
import org.apache.commons.lang.NotImplementedException;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisShardInfo;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.util.Sharded;
import javax.net.ssl.SSLParameters;
import javax.net.ssl.SSLSocketFactory;
import java.util.concurrent.TimeUnit;
public class JedisConnectionFactory implements ConnectionFactory<Jedis> {
private static final org.slf4j.Logger Logger = LoggerFactory.getLogger(JedisConnectionFactory.class);
private final OperationMonitor opMonitor;
private final SSLSocketFactory sslSocketFactory;
public JedisConnectionFactory(OperationMonitor monitor, SSLSocketFactory sslSocketFactory) {
this.opMonitor = monitor;
this.sslSocketFactory = sslSocketFactory;
}
@Override
public Connection<Jedis> createConnection(HostConnectionPool<Jedis> pool)
throws DynoConnectException {
return new JedisConnection(pool);
}
@Override
public Connection<Jedis> createConnectionWithDataStore(HostConnectionPool<Jedis> pool)
throws DynoConnectException {
return new JedisConnection(pool, true);
}
@Override
public Connection<Jedis> createConnectionWithConsistencyLevel(HostConnectionPool<Jedis> pool, String consistency) {
JedisConnection connection = new JedisConnection(pool);
connection.setConsistencyLevel(consistency);
return connection;
}
// TODO: raghu compose redisconnection with jedisconnection in it
public class JedisConnection implements Connection<Jedis> {
private final HostConnectionPool<Jedis> hostPool;
private final Jedis jedisClient;
private final ConnectionContextImpl context = new ConnectionContextImpl();
private String consistencyLevel;
private DynoConnectException lastDynoException;
public JedisConnection(HostConnectionPool<Jedis> hostPool) {
this(hostPool, false);
}
public JedisConnection(HostConnectionPool<Jedis> hostPool, boolean connectDataStore) {
this.hostPool = hostPool;
Host host = hostPool.getHost();
int port = connectDataStore ? host.getDatastorePort() : host.getPort();
if (sslSocketFactory == null) {
JedisShardInfo shardInfo = new JedisShardInfo(host.getHostAddress(), port,
hostPool.getConnectionTimeout(), hostPool.getSocketTimeout(), Sharded.DEFAULT_WEIGHT);
shardInfo.setPassword(host.getPassword());
jedisClient = new Jedis(shardInfo);
} else {
JedisShardInfo shardInfo = new JedisShardInfo(host.getHostAddress(), port,
hostPool.getConnectionTimeout(), hostPool.getSocketTimeout(), Sharded.DEFAULT_WEIGHT,
true, sslSocketFactory, new SSLParameters(), null);
shardInfo.setPassword(host.getPassword());
jedisClient = new Jedis(shardInfo);
}
}
public void setConsistencyLevel(String consistency) {
this.consistencyLevel = consistency;
}
public boolean isConsistencyLevelProvided() {
return this.consistencyLevel != null;
}
@Override
public <R> OperationResult<R> execute(Operation<Jedis, R> op) throws DynoException {
long startTime = System.nanoTime() / 1000;
String opName = op.getName();
OperationResultImpl<R> opResult = null;
try {
R result = op.execute(jedisClient, context);
if (context.hasMetadata("compression") || context.hasMetadata("decompression")) {
opMonitor.recordSuccess(opName, true);
} else {
opMonitor.recordSuccess(opName);
}
opResult = new OperationResultImpl<R>(opName, result, opMonitor);
opResult.addMetadata("connectionId", String.valueOf(this.hashCode()));
return opResult;
} catch (JedisConnectionException ex) {
Logger.warn("Caught JedisConnectionException: " + ex.getMessage());
opMonitor.recordFailure(opName, ex.getMessage());
lastDynoException = (DynoConnectException) new FatalConnectionException(ex).setAttempt(1).setHost(this.getHost());
throw lastDynoException;
} catch (RuntimeException ex) {
Logger.warn("Caught RuntimeException: " + ex.getMessage());
opMonitor.recordFailure(opName, ex.getMessage());
lastDynoException = (DynoConnectException) new FatalConnectionException(ex).setAttempt(1).setHost(this.getHost());
throw lastDynoException;
} finally {
long duration = System.nanoTime() / 1000 - startTime;
if (opResult != null) {
opResult.setLatency(duration, TimeUnit.MICROSECONDS);
}
}
}
@Override
public <R> ListenableFuture<OperationResult<R>> executeAsync(AsyncOperation<Jedis, R> op) throws DynoException {
throw new NotImplementedException();
}
@Override
public void close() {
jedisClient.quit();
jedisClient.disconnect();
}
@Override
public Host getHost() {
return hostPool.getHost();
}
@Override
public void open() throws DynoException {
jedisClient.connect();
if (isConsistencyLevelProvided()) {
jedisClient.getClient().sendCommand(DynoConfigCommand.CONN_CONSISTENCY, this.consistencyLevel);
jedisClient.getClient().getStatusCodeReply();
}
}
@Override
public DynoConnectException getLastException() {
return lastDynoException;
}
@Override
public HostConnectionPool<Jedis> getParentConnectionPool() {
return hostPool;
}
@Override
public void execPing() {
final String result;
try {
result = jedisClient.ping();
} catch (JedisConnectionException e) {
throw new DynoConnectException("Unsuccessful ping", e);
}
if (result == null || result.isEmpty()) {
throw new DynoConnectException("Unsuccessful ping, got empty result");
}
}
@Override
public ConnectionContext getContext() {
return context;
}
public Jedis getClient() {
return jedisClient;
}
}
}
| 6,129 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/OpName.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.jedis;
public enum OpName {
APPEND,
BITCOUNT, BLPOP, BRPOP,
DECR, DECRBY, DEL, DUMP,
ECHO, EXISTS, EXPIRE, EXPIREAT, EVAL, EVALSHA, EHSET, EHGET, EHDEL, EHEXISTS, EHGETALL, EHKEYS, EHMGET, EHVALS,
EHMSET, EHSETNX, EHSCAN, EHINCRBY, EHINCRBYFLOAT,
GET, GETBIT, GETRANGE, GETSET,
HDEL, HEXISTS, HGET, HGETALL, HINCRBY, HINCRBYFLOAT, HKEYS, HLEN, HMGET, HMSET, HSET, HSCAN, HSETNX, HVALS,
HSTRLEN,
INCR, INCRBY, INCRBYFLOAT,
KEYS, LINDEX,
LINSERT, LLEN, LPOP, LPUSH, LPUSHX, LRANGE, LREM, LSET, LTRIM,
MOVE, MGET, MSET, MSETNX,
PERSIST, PEXPIRE, PEXPIREAT, PSETEX, PTTL,
RENAME, RENAMENX, RESTORE, RPOP, RPOPLPUSH, RPUSH, RPUSHX,
SADD, SCAN, SCARD, SCRIPT_EXISTS, SCRIPT_FLUSH, SCRIPT_LOAD, SCRIPT_KILL, SDIFF, SDIFFSTORE, SET, SETBIT, SETEX,
SETNX, SETRANGE, SINTER, SINTERSTORE, SISMEMBER, SMEMBERS, SMOVE, SORT, SPOP, SRANDMEMBER, SREM, SSCAN, STRLEN,
SUBSTR, SUNION, SUNIONSTORE,
TTL, TYPE,
UNLINK,
ZADD, ZCARD, ZCOUNT, ZINCRBY, ZLEXCOUNT, ZRANGE, ZRANGEBYLEX, ZRANGEWITHSCORES, ZRANK, ZRANGEBYSCORE,
ZRANGEBYSCOREWITHSCORES, ZREM, ZREMRANGEBYLEX, ZREMRANGEBYRANK, ZREMRANGEBYSCORE, ZREVRANGE, ZREVRANGEBYLEX,
ZREVRANGEBYSCORE, ZREVRANGEBYSCOREWITHSCORES, ZREVRANGEWITHSCORES, ZREVRANK, ZSCAN, ZSCORE
}
| 6,130 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/DynoDualWriterPipeline.java | /*******************************************************************************
* Copyright 2018 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.dyno.jedis;
import com.netflix.dyno.connectionpool.ConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolImpl;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.*;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
/**
* Dual writer for pipeline commands. This dual writer will apply mutation to both
* the primary and shadow Dyno clusters but the response returned is only for the
* primary dynomite cluster. Non-mutation operations are targeted only to primary
* dynomite clusters.
*/
public class DynoDualWriterPipeline extends DynoJedisPipeline {
private static final org.slf4j.Logger logger = LoggerFactory.getLogger(DynoDualWriterPipeline.class);
private static ExecutorService executor = Executors.newSingleThreadExecutor();
private final ConnectionPoolImpl<Jedis> connPool;
private final DynoJedisPipeline shadowPipeline;
private final DynoDualWriterClient.Dial dial;
DynoDualWriterPipeline(String appName,
ConnectionPoolImpl<Jedis> pool,
DynoJedisPipelineMonitor operationMonitor,
ConnectionPoolMonitor connPoolMonitor,
ConnectionPoolImpl<Jedis> shadowConnectionPool,
DynoDualWriterClient.Dial dial) {
super(pool, operationMonitor, connPoolMonitor);
this.connPool = pool;
this.dial = dial;
// use a new pipeline monitor for shadow cluster
int flushTimerFrequency = shadowConnectionPool.getConfiguration().getTimingCountersResetFrequencySeconds();
DynoJedisPipelineMonitor shadowOperationMonitor = new DynoJedisPipelineMonitor(appName, flushTimerFrequency);
this.shadowPipeline = new DynoJedisPipeline(shadowConnectionPool, shadowOperationMonitor,
shadowConnectionPool.getMonitor());
}
private ConnectionPoolImpl<Jedis> getConnPool() {
return this.connPool;
}
/*
* For async scheduling of Jedis commands on shadow clusters.
*/
private <R> Future<Response<R>> writeAsync(final String key, Callable<Response<R>> func) {
if (canSendShadowRequest(key)) {
return executor.submit(func);
}
return null;
}
/*
* For async scheduling of Jedis binary commands on shadow clusters.
*/
private <R> Future<Response<R>> writeAsync(final byte[] key, Callable<Response<R>> func) {
if (canSendShadowRequest(key)) {
return executor.submit(func);
}
return null;
}
/*
* Asynchronous processing of non Jedis commands. No dial check as these operations are preceded
* by Jedis pipeline commands that perform dial check on shadow cluster.
*/
private <R> Future<R> scheduleAsync(Callable<R> func) {
return executor.submit(func);
}
/**
* Returns true if the connection pool
* <li>Is NOT idle</li>
* <li>Has active pools (the shadow cluster may disappear at any time and we don't want to bloat logs)</li>
* <li>The key is in range in the dial</li>
* <p>
* The idle check is necessary since there may be active host pools however the shadow client may not be able to
* connect to them, for example, if security groups are not configured properly.
*/
private boolean canSendShadowRequest(String key) {
return this.getConnPool().getConfiguration().isDualWriteEnabled() &&
!this.getConnPool().isIdle() &&
this.getConnPool().getActivePools().size() > 0 &&
dial.isInRange(key);
}
private boolean canSendShadowRequest(byte[] key) {
return this.getConnPool().getConfiguration().isDualWriteEnabled() &&
!this.getConnPool().isIdle() &&
this.getConnPool().getActivePools().size() > 0 &&
dial.isInRange(key);
}
/**
* Sync operation will wait for primary cluster and the result is returned. But,
* on a shadow cluster the operation is asynchronous and the result is not
* returned to client.
*/
@Override
public void sync() {
scheduleAsync(() -> {
shadowPipeline.sync();
return null;
});
super.sync();
}
@Override
public List<Object> syncAndReturnAll() {
scheduleAsync(() -> {
this.shadowPipeline.sync();
return null;
});
return super.syncAndReturnAll();
}
@Override
public void discardPipelineAndReleaseConnection() {
scheduleAsync(() -> {
this.shadowPipeline.discardPipelineAndReleaseConnection();
return null;
});
super.discardPipelineAndReleaseConnection();
}
@Override
public void close() throws Exception {
this.shadowPipeline.close(); // close the shawdow pipeline synchronously
super.close();
}
//-------------------------- JEDIS PIPELINE COMMANDS ----------------------------
@Override
public Response<Long> append(final String key, final String value) {
writeAsync(key, () -> shadowPipeline.append(key, value));
return DynoDualWriterPipeline.super.append(key, value);
}
@Override
public Response<List<String>> blpop(final String arg) {
writeAsync(arg, () -> shadowPipeline.blpop(arg));
return DynoDualWriterPipeline.super.blpop(arg);
}
@Override
public Response<List<String>> brpop(final String arg) {
writeAsync(arg, () -> shadowPipeline.brpop(arg));
return DynoDualWriterPipeline.super.brpop(arg);
}
@Override
public Response<Long> decr(final String key) {
writeAsync(key, () -> shadowPipeline.decr(key));
return DynoDualWriterPipeline.super.decr(key);
}
@Override
public Response<Long> decrBy(final String key, final long integer) {
writeAsync(key, () -> shadowPipeline.decrBy(key, integer));
return DynoDualWriterPipeline.super.decrBy(key, integer);
}
@Override
public Response<Long> del(final String key) {
writeAsync(key, () -> shadowPipeline.del(key));
return DynoDualWriterPipeline.super.del(key);
}
@Override
public Response<Long> expire(final String key, final int seconds) {
writeAsync(key, () -> shadowPipeline.expire(key, seconds));
return DynoDualWriterPipeline.super.expire(key, seconds);
}
@Override
public Response<Long> pexpire(String key, long milliseconds) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> expireAt(final String key, final long unixTime) {
writeAsync(key, () -> shadowPipeline.expireAt(key, unixTime));
return DynoDualWriterPipeline.super.expireAt(key, unixTime);
}
@Override
public Response<Long> pexpireAt(String key, long millisecondsTimestamp) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hdel(final String key, final String... field) {
writeAsync(key, () -> shadowPipeline.hdel(key, field));
return DynoDualWriterPipeline.super.hdel(key, field);
}
@Override
public Response<Long> hincrBy(final String key, final String field, final long value) {
writeAsync(key, () -> shadowPipeline.hincrBy(key, field, value));
return DynoDualWriterPipeline.super.hincrBy(key, field, value);
}
/* not supported by RedisPipeline 2.7.3 */
public Response<Double> hincrByFloat(final String key, final String field, final double value) {
writeAsync(key, () -> shadowPipeline.hincrByFloat(key, field, value));
return DynoDualWriterPipeline.super.hincrByFloat(key, field, value);
}
/**
* This method is a BinaryRedisPipeline command which dyno does not yet properly
* support, therefore the interface is not yet implemented since only a few
* binary commands are present.
*/
public Response<String> hmset(final byte[] key, final Map<byte[], byte[]> hash) {
writeAsync(key, () -> shadowPipeline.hmset(key, hash));
return DynoDualWriterPipeline.super.hmset(key, hash);
}
@Override
public Response<String> hmset(final String key, final Map<String, String> hash) {
writeAsync(key, () -> shadowPipeline.hmset(key, hash));
return DynoDualWriterPipeline.super.hmset(key, hash);
}
@Override
public Response<Long> hset(final String key, final String field, final String value) {
writeAsync(key, () -> shadowPipeline.hset(key, field, value));
return DynoDualWriterPipeline.super.hset(key, field, value);
}
/**
* This method is a BinaryRedisPipeline command which dyno does not yet properly
* support, therefore the interface is not yet implemented.
*/
public Response<Long> hset(final byte[] key, final byte[] field, final byte[] value) {
writeAsync(key, () -> shadowPipeline.hset(key, field, value));
return DynoDualWriterPipeline.super.hset(key, field, value);
}
@Override
public Response<Long> hsetnx(final String key, final String field, final String value) {
writeAsync(key, () -> shadowPipeline.hsetnx(key, field, value));
return DynoDualWriterPipeline.super.hsetnx(key, field, value);
}
@Override
public Response<Long> incr(final String key) {
writeAsync(key, () -> shadowPipeline.incr(key));
return DynoDualWriterPipeline.super.incr(key);
}
@Override
public Response<Long> incrBy(final String key, final long integer) {
writeAsync(key, () -> shadowPipeline.incrBy(key, integer));
return DynoDualWriterPipeline.super.incrBy(key, integer);
}
/* not supported by RedisPipeline 2.7.3 */
public Response<Double> incrByFloat(final String key, final double increment) {
writeAsync(key, () -> shadowPipeline.incrByFloat(key, increment));
return DynoDualWriterPipeline.super.incrByFloat(key, increment);
}
@Override
public Response<Long> linsert(final String key, final ListPosition where, final String pivot, final String value) {
writeAsync(key, () -> shadowPipeline.linsert(key, where, pivot, value));
return DynoDualWriterPipeline.super.linsert(key, where, pivot, value);
}
@Override
public Response<String> lpop(final String key) {
writeAsync(key, () -> shadowPipeline.lpop(key));
return DynoDualWriterPipeline.super.lpop(key);
}
@Override
public Response<Long> lpush(final String key, final String... string) {
writeAsync(key, () -> shadowPipeline.lpush(key, string));
return DynoDualWriterPipeline.super.lpush(key, string);
}
@Override
public Response<Long> lpushx(final String key, final String... string) {
writeAsync(key, () -> shadowPipeline.lpushx(key, string));
return DynoDualWriterPipeline.super.lpushx(key, string);
}
@Override
public Response<Long> lrem(final String key, final long count, final String value) {
writeAsync(key, () -> shadowPipeline.lrem(key, count, value));
return DynoDualWriterPipeline.super.lrem(key, count, value);
}
@Override
public Response<String> lset(final String key, final long index, final String value) {
writeAsync(key, () -> shadowPipeline.lset(key, index, value));
return DynoDualWriterPipeline.super.lset(key, index, value);
}
@Override
public Response<String> ltrim(final String key, final long start, final long end) {
writeAsync(key, () -> shadowPipeline.ltrim(key, start, end));
return DynoDualWriterPipeline.super.ltrim(key, start, end);
}
@Override
public Response<Long> move(final String key, final int dbIndex) {
writeAsync(key, () -> shadowPipeline.move(key, dbIndex));
return DynoDualWriterPipeline.super.move(key, dbIndex);
}
@Override
public Response<Long> persist(final String key) {
writeAsync(key, () -> shadowPipeline.persist(key));
return DynoDualWriterPipeline.super.persist(key);
}
/* not supported by RedisPipeline 2.7.3 */
public Response<String> rename(final String oldkey, final String newkey) {
writeAsync(newkey, () -> shadowPipeline.rename(oldkey, newkey));
return DynoDualWriterPipeline.super.rename(oldkey, newkey);
}
/* not supported by RedisPipeline 2.7.3 */
public Response<Long> renamenx(final String oldkey, final String newkey) {
writeAsync(newkey, () -> shadowPipeline.renamenx(oldkey, newkey));
return DynoDualWriterPipeline.super.renamenx(oldkey, newkey);
}
@Override
public Response<String> rpop(final String key) {
writeAsync(key, () -> shadowPipeline.rpop(key));
return DynoDualWriterPipeline.super.rpop(key);
}
@Override
public Response<Long> rpush(final String key, final String... string) {
writeAsync(key, () -> shadowPipeline.rpush(key, string));
return DynoDualWriterPipeline.super.rpush(key, string);
}
@Override
public Response<Long> rpushx(final String key, final String... string) {
writeAsync(key, () -> shadowPipeline.rpushx(key, string));
return DynoDualWriterPipeline.super.rpushx(key, string);
}
@Override
public Response<Long> sadd(final String key, final String... member) {
writeAsync(key, () -> shadowPipeline.sadd(key, member));
return DynoDualWriterPipeline.super.sadd(key, member);
}
@Override
public Response<String> set(final String key, final String value) {
writeAsync(key, () -> shadowPipeline.set(key, value));
return DynoDualWriterPipeline.super.set(key, value);
}
@Override
public Response<Boolean> setbit(final String key, final long offset, final boolean value) {
writeAsync(key, () -> shadowPipeline.setbit(key, offset, value));
return DynoDualWriterPipeline.super.setbit(key, offset, value);
}
@Override
public Response<String> setex(final String key, final int seconds, final String value) {
writeAsync(key, () -> shadowPipeline.setex(key, seconds, value));
return DynoDualWriterPipeline.super.setex(key, seconds, value);
}
@Override
public Response<Long> setnx(final String key, final String value) {
writeAsync(key, () -> shadowPipeline.setnx(key, value));
return DynoDualWriterPipeline.super.setnx(key, value);
}
@Override
public Response<Long> setrange(final String key, final long offset, final String value) {
writeAsync(key, () -> shadowPipeline.setrange(key, offset, value));
return DynoDualWriterPipeline.super.setrange(key, offset, value);
}
@Override
public Response<List<String>> sort(final String key) {
writeAsync(key, () -> shadowPipeline.sort(key));
return DynoDualWriterPipeline.super.sort(key);
}
@Override
public Response<List<String>> sort(final String key, final SortingParams sortingParameters) {
writeAsync(key, () -> shadowPipeline.sort(key, sortingParameters));
return DynoDualWriterPipeline.super.sort(key, sortingParameters);
}
@Override
public Response<String> spop(final String key) {
writeAsync(key, () -> shadowPipeline.spop(key));
return DynoDualWriterPipeline.super.spop(key);
}
@Override
public Response<Set<String>> spop(final String key, final long count) {
writeAsync(key, () -> shadowPipeline.spop(key, count));
return DynoDualWriterPipeline.super.spop(key, count);
}
@Override
public Response<Long> srem(final String key, final String... member) {
writeAsync(key, () -> shadowPipeline.srem(key, member));
return DynoDualWriterPipeline.super.srem(key, member);
}
/**
* This method is not supported by the BinaryRedisPipeline interface.
*/
public Response<ScanResult<String>> sscan(final String key, final int cursor) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline");
}
/**
* This method is not supported by the BinaryRedisPipeline interface.
*/
public Response<ScanResult<String>> sscan(final String key, final String cursor) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline");
}
@Override
public Response<Long> zadd(final String key, final double score, final String member) {
writeAsync(key, () -> shadowPipeline.zadd(key, score, member));
return DynoDualWriterPipeline.super.zadd(key, score, member);
}
@Override
public Response<Long> zadd(final String key, final Map<String, Double> scoreMembers) {
writeAsync(key, () -> shadowPipeline.zadd(key, scoreMembers));
return DynoDualWriterPipeline.super.zadd(key, scoreMembers);
}
@Override
public Response<Double> zincrby(final String key, final double score, final String member) {
writeAsync(key, () -> shadowPipeline.zincrby(key, score, member));
return DynoDualWriterPipeline.super.zincrby(key, score, member);
}
@Override
public Response<Long> zrem(final String key, final String... member) {
writeAsync(key, () -> shadowPipeline.zrem(key, member));
return DynoDualWriterPipeline.super.zrem(key, member);
}
@Override
public Response<Long> zremrangeByRank(final String key, final long start, final long end) {
writeAsync(key, () -> shadowPipeline.zremrangeByRank(key, start, end));
return DynoDualWriterPipeline.super.zremrangeByRank(key, start, end);
}
@Override
public Response<Long> zremrangeByScore(final String key, final double start, final double end) {
writeAsync(key, () -> shadowPipeline.zremrangeByScore(key, start, end));
return DynoDualWriterPipeline.super.zremrangeByScore(key, start, end);
}
/**
* This method is not supported by the BinaryRedisPipeline interface.
*/
public Response<ScanResult<Tuple>> zscan(final String key, final int cursor) {
throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline");
}
@Override
public Response<Long> zlexcount(String key, String min, String max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<String>> zrangeByLex(String key, String min, String max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<String>> zrangeByLex(String key, String min, String max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zremrangeByLex(String key, String start, String end) {
throw new UnsupportedOperationException("not yet implemented");
}
/**** Binary Operations ****/
@Override
public Response<String> set(final byte[] key, final byte[] value) {
writeAsync(key, () -> shadowPipeline.set(key, value));
return DynoDualWriterPipeline.super.set(key, value);
}
@Override
public Response<Long> pfadd(String key, String... elements) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pfcount(String key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<Long>> bitfield(String key, String... arguments) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<String>> zrevrangeByLex(String key, String max, String min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<String>> zrevrangeByLex(String key, String max, String min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> geoadd(String arg0, Map<String, GeoCoordinate> arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> geoadd(String arg0, double arg1, double arg2, String arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> geodist(String arg0, String arg1, String arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> geodist(String arg0, String arg1, String arg2, GeoUnit arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<String>> geohash(String arg0, String... arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoCoordinate>> geopos(String arg0, String... arg1) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadius(String arg0, double arg1, double arg2, double arg3,
GeoUnit arg4) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadius(String arg0, double arg1, double arg2, double arg3, GeoUnit arg4,
GeoRadiusParam arg5) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMember(String arg0, String arg1, double arg2, GeoUnit arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMember(String arg0, String arg1, double arg2, GeoUnit arg3,
GeoRadiusParam arg4) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zadd(String arg0, Map<String, Double> arg1, ZAddParams arg2) {
throw new UnsupportedOperationException("not yet implemented");
}
public Response<Long> zadd(final String key, final double score, final String member, final ZAddParams params) {
writeAsync(key, () -> shadowPipeline.zadd(key, score, member, params));
return DynoDualWriterPipeline.super.zadd(key, score, member, params);
}
@Override
public Response<Double> zincrby(String arg0, double arg1, String arg2, ZIncrByParams arg3) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> append(byte[] key, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> blpop(byte[] arg) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> brpop(byte[] arg) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> decr(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> decrBy(final byte[] key, final long integer) {
writeAsync(key, () -> shadowPipeline.decrBy(key, integer));
return DynoDualWriterPipeline.super.decrBy(key, integer);
}
@Override
public Response<Long> del(final byte[] key) {
writeAsync(key, () -> shadowPipeline.del(key));
return DynoDualWriterPipeline.super.del(key);
}
@Override
public Response<byte[]> echo(byte[] string) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> expire(byte[] key, int seconds) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pexpire(byte[] key, long milliseconds) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> expireAt(byte[] key, long unixTime) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pexpireAt(byte[] key, long millisecondsTimestamp) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Boolean> getbit(byte[] key, long offset) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> getSet(final byte[] key, final byte[] value) {
writeAsync(key, () -> shadowPipeline.getSet(key, value));
return DynoDualWriterPipeline.super.getSet(key, value);
}
@Override
public Response<byte[]> getrange(byte[] key, long startOffset, long endOffset) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hdel(byte[] key, byte[]... field) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Boolean> hexists(byte[] key, byte[] field) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hincrBy(byte[] key, byte[] field, long value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> hkeys(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hlen(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> hsetnx(byte[] key, byte[] field, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> hvals(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> incr(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> incrBy(byte[] key, long integer) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> lindex(byte[] key, long index) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> linsert(byte[] key, ListPosition where, byte[] pivot, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> llen(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> lpop(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> lpush(byte[] key, byte[]... string) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> lpushx(byte[] key, byte[]... bytes) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> lrange(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> lrem(byte[] key, long count, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> lset(byte[] key, long index, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> ltrim(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> move(byte[] key, int dbIndex) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> persist(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> rpop(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> rpush(byte[] key, byte[]... string) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> rpushx(byte[] key, byte[]... string) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> sadd(byte[] key, byte[]... member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> scard(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Boolean> setbit(byte[] key, long offset, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> setrange(byte[] key, long offset, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> setex(final byte[] key, final int seconds, final byte[] value) {
writeAsync(key, () -> shadowPipeline.setex(key, seconds, value));
return DynoDualWriterPipeline.super.setex(key, seconds, value);
}
@Override
public Response<Long> setnx(byte[] key, byte[] value) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> smembers(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Boolean> sismember(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> sort(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> sort(byte[] key, SortingParams sortingParameters) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> spop(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> spop(byte[] key, long count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<byte[]> srandmember(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> srem(byte[] key, byte[]... member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> strlen(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> substr(byte[] key, int start, int end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> ttl(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<String> type(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zadd(byte[] key, double score, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zadd(byte[] key, double score, byte[] member, ZAddParams params) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zadd(byte[] key, Map<byte[], Double> scoreMembers) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zadd(byte[] key, Map<byte[], Double> scoreMembers, ZAddParams params) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zcard(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zcount(byte[] key, double min, double max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> zincrby(byte[] key, double score, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> zincrby(byte[] key, double score, byte[] member, ZIncrByParams params) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrange(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByScore(byte[] key, double min, double max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByScore(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByScore(byte[] key, double min, double max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByScore(byte[] key, byte[] min, byte[] max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(byte[] key, double min, double max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(byte[] key, double min, double max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrangeByScoreWithScores(byte[] key, byte[] min, byte[] max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByScore(byte[] key, double max, double min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByScore(byte[] key, byte[] max, byte[] min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByScore(byte[] key, double max, double min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByScore(byte[] key, byte[] max, byte[] min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(byte[] key, double max, double min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(byte[] key, byte[] max, byte[] min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(byte[] key, double max, double min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrevrangeByScoreWithScores(byte[] key, byte[] max, byte[] min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrangeWithScores(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zrank(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zrem(byte[] key, byte[]... member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zremrangeByRank(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zremrangeByScore(byte[] key, double start, double end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zremrangeByScore(byte[] key, byte[] start, byte[] end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrange(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<Tuple>> zrevrangeWithScores(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zrevrank(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> zscore(byte[] key, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zlexcount(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByLex(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrangeByLex(byte[] key, byte[] min, byte[] max, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByLex(byte[] key, byte[] max, byte[] min) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Set<byte[]>> zrevrangeByLex(byte[] key, byte[] max, byte[] min, int offset, int count) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> zremrangeByLex(byte[] key, byte[] min, byte[] max) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> bitcount(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> bitcount(byte[] key, long start, long end) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pfadd(byte[] key, byte[]... elements) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> pfcount(byte[] key) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> geoadd(byte[] key, double longitude, double latitude, byte[] member) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Long> geoadd(byte[] key, Map<byte[], GeoCoordinate> memberCoordinateMap) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> geodist(byte[] key, byte[] member1, byte[] member2) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<Double> geodist(byte[] key, byte[] member1, byte[] member2, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<byte[]>> geohash(byte[] key, byte[]... members) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoCoordinate>> geopos(byte[] key, byte[]... members) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadius(byte[] key, double longitude, double latitude, double radius,
GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadius(byte[] key, double longitude, double latitude, double radius,
GeoUnit unit, GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMember(byte[] key, byte[] member, double radius, GeoUnit unit) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<GeoRadiusResponse>> georadiusByMember(byte[] key, byte[] member, double radius, GeoUnit unit,
GeoRadiusParam param) {
throw new UnsupportedOperationException("not yet implemented");
}
@Override
public Response<List<Long>> bitfield(byte[] key, byte[]... elements) {
throw new UnsupportedOperationException("not yet implemented");
}
}
| 6,131 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/DynoJedisUtils.java | package com.netflix.dyno.jedis;
import com.netflix.discovery.EurekaClient;
import com.netflix.dyno.connectionpool.ConnectionFactory;
import com.netflix.dyno.connectionpool.ConnectionPool;
import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration;
import com.netflix.dyno.connectionpool.ConnectionPoolMonitor;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.exception.DynoConnectException;
import com.netflix.dyno.connectionpool.exception.NoAvailableHostsException;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolImpl;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import com.netflix.dyno.connectionpool.impl.lb.HttpEndpointBasedTokenMapSupplier;
import com.netflix.dyno.contrib.DynoOPMonitor;
import com.netflix.dyno.contrib.EurekaHostsSupplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import javax.inject.Singleton;
import javax.net.ssl.SSLSocketFactory;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Stream;
@Singleton
public class DynoJedisUtils {
private static final Logger logger = LoggerFactory.getLogger(DynoJedisClient.class);
public static void updateConnectionPoolConfig(ConnectionPoolConfigurationImpl cpConfig,
HostSupplier hostSupplier, TokenMapSupplier tokenMapSupplier,
EurekaClient discoveryClient, String clusterName) {
if (hostSupplier == null) {
if (discoveryClient == null) {
throw new DynoConnectException("HostSupplier not provided. Cannot initialize EurekaHostsSupplier "
+ "which requires a DiscoveryClient");
} else {
hostSupplier = new EurekaHostsSupplier(clusterName, discoveryClient);
}
}
cpConfig.withHostSupplier(hostSupplier);
if (tokenMapSupplier != null)
cpConfig.withTokenSupplier(tokenMapSupplier);
setLoadBalancingStrategy(cpConfig);
setHashtagConnectionPool(hostSupplier, cpConfig);
}
public static ConnectionPool<Jedis> createConnectionPool(String appName, DynoOPMonitor opMonitor,
ConnectionPoolMonitor cpMonitor, ConnectionPoolConfiguration cpConfig,
SSLSocketFactory sslSocketFactory) {
JedisConnectionFactory connFactory = new JedisConnectionFactory(opMonitor, sslSocketFactory);
return startConnectionPool(appName, connFactory, cpConfig, cpMonitor);
}
private static ConnectionPool<Jedis> startConnectionPool(String appName, ConnectionFactory connFactory,
ConnectionPoolConfiguration cpConfig, ConnectionPoolMonitor cpMonitor) {
final ConnectionPool<Jedis> pool = new ConnectionPoolImpl<>(connFactory, cpConfig, cpMonitor);
try {
logger.info("Starting connection pool for app " + appName);
pool.start().get();
Runtime.getRuntime().addShutdownHook(new Thread(() -> pool.shutdown()));
} catch (NoAvailableHostsException e) {
if (cpConfig.getFailOnStartupIfNoHosts()) {
throw new RuntimeException(e);
}
logger.warn("UNABLE TO START CONNECTION POOL -- IDLING");
pool.idle();
} catch (Exception e) {
throw new RuntimeException(e);
}
return pool;
}
private static void setLoadBalancingStrategy(ConnectionPoolConfigurationImpl config) {
if (ConnectionPoolConfiguration.LoadBalancingStrategy.TokenAware == config.getLoadBalancingStrategy()) {
if (config.getTokenSupplier() == null) {
logger.warn(
"TOKEN AWARE selected and no token supplier found, using default HttpEndpointBasedTokenMapSupplier()");
config.withTokenSupplier(new HttpEndpointBasedTokenMapSupplier());
}
if (config.getLocalRack() == null && config.localZoneAffinity()) {
String warningMessage = "DynoJedisClient for app=[" + config.getName()
+ "] is configured for local rack affinity "
+ "but cannot determine the local rack! DISABLING rack affinity for this instance. "
+ "To make the client aware of the local rack either use "
+ "ConnectionPoolConfigurationImpl.setLocalRack() when constructing the client "
+ "instance or ensure EC2_AVAILABILTY_ZONE is set as an environment variable, e.g. "
+ "run with -DLOCAL_RACK=us-east-1c";
config.setLocalZoneAffinity(false);
logger.warn(warningMessage);
}
}
}
/**
* Set the hash to the connection pool if is provided by Dynomite
*
* @param hostSupplier
* @param config
*/
private static void setHashtagConnectionPool(HostSupplier hostSupplier, ConnectionPoolConfigurationImpl config) {
// Find the hosts from host supplier
List<Host> hosts = (List<Host>) hostSupplier.getHosts();
Collections.sort(hosts);
// Take the token map supplier (aka the token topology from
// Dynomite)
TokenMapSupplier tokenMapSupplier = config.getTokenSupplier();
// Create a list of host/Tokens
List<HostToken> hostTokens;
if (tokenMapSupplier != null) {
Set<Host> hostSet = new HashSet<Host>(hosts);
hostTokens = tokenMapSupplier.getTokens(hostSet);
/* Dyno cannot reach the TokenMapSupplier endpoint,
* therefore no nodes can be retrieved.
*/
if (hostTokens.isEmpty()) {
throw new DynoConnectException("No hosts in the TokenMapSupplier");
}
} else {
throw new DynoConnectException("TokenMapSupplier not provided");
}
String hashtag = hostTokens.get(0).getHost().getHashtag();
Stream<String> htStream = hostTokens.stream().map(hostToken -> hostToken.getHost().getHashtag());
if (hashtag == null) {
htStream.filter(ht -> ht != null).findAny().ifPresent(ignore -> {
logger.error("Hashtag mismatch across hosts");
throw new RuntimeException("Hashtags are different across hosts");
});
} else {
/**
* Checking hashtag consistency from all Dynomite hosts. If
* hashtags are not consistent, we need to throw an exception.
*/
htStream.filter(ht -> !hashtag.equals(ht)).findAny().ifPresent(ignore -> {
logger.error("Hashtag mismatch across hosts");
throw new RuntimeException("Hashtags are different across hosts");
});
}
if (hashtag != null) {
config.withHashtag(hashtag);
}
}
}
| 6,132 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/operation/BaseKeyOperation.java | package com.netflix.dyno.jedis.operation;
import com.netflix.dyno.connectionpool.Operation;
import com.netflix.dyno.jedis.OpName;
import redis.clients.jedis.Jedis;
public abstract class BaseKeyOperation<T> implements Operation<Jedis, T> {
private final String key;
private final byte[] binaryKey;
private final OpName op;
public BaseKeyOperation(final String k, final OpName o) {
this.key = k;
this.binaryKey = null;
this.op = o;
}
public BaseKeyOperation(final byte[] k, final OpName o) {
this.key = null;
this.binaryKey = k;
this.op = o;
}
@Override
public String getName() {
return op.name();
}
@Override
public String getStringKey() {
return this.key;
}
public byte[] getBinaryKey() {
return this.binaryKey;
}
}
| 6,133 |
0 | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis | Create_ds/dyno/dyno-jedis/src/main/java/com/netflix/dyno/jedis/operation/MultiKeyOperation.java | package com.netflix.dyno.jedis.operation;
import com.netflix.dyno.connectionpool.Operation;
import com.netflix.dyno.jedis.OpName;
import redis.clients.jedis.Jedis;
import java.util.List;
/**
* A poor man's solution for multikey operation. This is similar to
* basekeyoperation just that it takes a list of keys as arguments. For
* token aware, we just use the first key in the list. Ideally we should be
* doing a scatter gather
*/
public abstract class MultiKeyOperation<T> implements Operation<Jedis, T> {
private final List<String> keys;
private final List<byte[]> binaryKeys;
private final OpName op;
public MultiKeyOperation(final List keys, final OpName o) {
Object firstKey = (keys != null && keys.size() > 0) ? keys.get(0) : null;
if (firstKey != null) {
if (firstKey instanceof String) {//string key
this.keys = keys;
this.binaryKeys = null;
} else if (firstKey instanceof byte[]) {//binary key
this.keys = null;
this.binaryKeys = keys;
} else {//something went wrong here
this.keys = null;
this.binaryKeys = null;
}
} else {
this.keys = null;
this.binaryKeys = null;
}
this.op = o;
}
@Override
public String getName() {
return op.name();
}
/**
* Sends back only the first key of the multi key operation.
* @return
*/
@Override
public String getStringKey() {
return (this.keys != null) ? this.keys.get(0) : null;
}
public byte[] getBinaryKey() {
return (binaryKeys != null) ? binaryKeys.get(0) : null;
}
}
| 6,134 |
0 | Create_ds/dyno/dyno-client/src/main/java/com/netflix | Create_ds/dyno/dyno-client/src/main/java/com/netflix/dyno/DynoClientModule.java | package com.netflix.dyno;
import com.google.inject.AbstractModule;
import com.google.inject.Singleton;
import com.netflix.dyno.jedis.DynoJedisModule;
@Singleton
public class DynoClientModule extends AbstractModule {
public DynoClientModule() {
}
@Override
public int hashCode() {
return getClass().hashCode();
}
@Override
public boolean equals(Object obj) {
return (obj != null) && (obj.getClass() == getClass());
}
@Override
protected void configure() {
install(new DynoJedisModule());
}
}
| 6,135 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/util/TestClass.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.util;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.LinkedList;
import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest;
/**
* A class that holds some helper methods for running tests that test classes
* should inherit from.
*/
public class TestClass {
public Object getFieldFrom(Object o, String fieldName, Class<?> clazz) throws Exception {
Field field = clazz.getDeclaredField(fieldName);
field.setAccessible(true);
return field.get(o);
}
public Object getFieldFrom(Object o, String fieldName) {
Class<?> clazz = o.getClass();
boolean isDone = false;
while (!isDone) {
try {
return getFieldFrom(o, fieldName, clazz);
} catch (NoSuchFieldException e) {
if (clazz.equals(Object.class)) {
throw new RuntimeException(e);
} else {
clazz = clazz.getSuperclass();
}
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
return null;
}
public void setFieldFrom(Object o, String fieldName, Object value, Class<?> clazz) throws Exception {
Field field = clazz.getDeclaredField(fieldName);
field.setAccessible(true);
field.set(o, value);
}
public void setFieldFrom(Object o, String fieldName, Object value) {
Class<?> clazz = o.getClass();
boolean isDone = false;
while (!isDone) {
try {
setFieldFrom(o, fieldName, value, clazz);
isDone = true;
} catch (NoSuchFieldException e) {
if (clazz.equals(Object.class)) {
isDone = true;
} else {
clazz = clazz.getSuperclass();
}
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
e.printStackTrace();
}
}
}
public Constructor<?> getConstructorWithNArguments(Class<?> clazz, int n) {
Constructor<?>[] ctors = clazz.getDeclaredConstructors();
Constructor<?> ctor = null;
for (int i = 0; i < ctors.length; i++) {
ctor = ctors[i];
if (ctor.getGenericParameterTypes().length == n) {
break;
}
}
return ctor;
}
public Object newInstance(Constructor<?> ctor, Object... initargs) {
try {
ctor.setAccessible(true);
return ctor.newInstance(initargs);
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
public Object callConstructorWithArguments(Class<?> clazz, Object... initargs) {
Constructor<?> ctor = getConstructorWithNArguments(clazz, initargs.length);
return newInstance(ctor, initargs);
}
public Object callConstructorWithArguments(int argsOffset, Class<?> clazz, Object... initargs) {
Constructor<?> ctor = getConstructorWithNArguments(clazz, initargs.length + argsOffset);
return newInstance(ctor, initargs);
}
public GetSecretValueRequest requestWithName(String secretName) {
return GetSecretValueRequest.builder().secretId(secretName).build();
}
public Object callMethodWithArguments(Object object, String methodName, Object... args) {
try {
LinkedList<Method> allMethods = new LinkedList<>();
Class<?> clazz = object.getClass();
while (!clazz.equals(Object.class)) {
Method[] methods = clazz.getDeclaredMethods();
allMethods.addAll(Arrays.asList(methods));
clazz = clazz.getSuperclass();
}
Method correctMethod = null;
for (Method method : allMethods) {
if (method.getName().equals(methodName) && method.getParameterCount() == args.length) {
correctMethod = method;
break;
}
}
if (correctMethod == null) {
throw new NoSuchMethodException("No appropriate method.");
}
correctMethod.setAccessible(true);
return correctMethod.invoke(object, args);
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
public interface throwingRunnable {
void run() throws Exception;
}
public void assertThrows(Class<? extends Exception> exception, throwingRunnable code) {
try {
code.run();
throw new RuntimeException("Should have thrown a " + exception.getName() + " but threw nothing.");
} catch (Exception e) {
if (!exception.isAssignableFrom(e.getClass())) {
e.printStackTrace();
throw new RuntimeException(
"Should have thrown a " + exception.getName() + " but threw " + e.getClass().getName());
}
}
}
public void assertThrows(Exception exception, throwingRunnable code) {
try {
code.run();
throw new RuntimeException("Should have thrown a " + exception.getMessage() + " but threw nothing.");
} catch (Exception e) {
if (!exception.equals(e)) {
e.printStackTrace();
throw new RuntimeException(
"Should have thrown a " + exception.getMessage() + " but threw " + e.getClass().getName());
}
}
}
public void assertNotThrows(throwingRunnable code) {
try {
code.run();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException("Should not have thrown, but threw " + e.getClass().getName());
}
}
}
| 6,136 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/util/SQLExceptionUtilsTest.java | package com.amazonaws.secretsmanager.util;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.sql.SQLException;
import org.junit.Test;
public class SQLExceptionUtilsTest {
@Test
public void test_unwrapAndCheckForCode_nullReturnsFalse() {
assertFalse(SQLExceptionUtils.unwrapAndCheckForCode(null, 1045));
}
@Test
public void test_unwrapAndCheckForCode_wrappedException_returnsTrue() {
SQLException e = new SQLException("", "", 1045);
SQLException wrapper = new SQLException("", "", 0, e);
assertTrue(SQLExceptionUtils.unwrapAndCheckForCode(wrapper, 1045));
}
@Test
public void test_unwrapAndCheckForCode_wrappedException_returnsFalse() {
SQLException e = new SQLException("", "", 42);
SQLException wrapper = new SQLException("", "", 0, e);
assertFalse(SQLExceptionUtils.unwrapAndCheckForCode(wrapper, 1045));
}
@Test
public void test_unwrapAndCheckForCode_loopInWrappedExceptions_returnsFalse() {
SQLException e1 = new SQLException("", "", 42);
SQLException e2 = new SQLException("", "", 0, e1);
e1.initCause(e2);
assertFalse(SQLExceptionUtils.unwrapAndCheckForCode(e1, 1046));
}
@Test
public void test_unwrapAndCheckForCode_nonSqlException_parentStillGetsFound() {
SQLException e0 = new SQLException("", "", 1046);
Exception e1 = new Exception("test", e0);
SQLException e2 = new SQLException("", "", 42,e1);
assertTrue(SQLExceptionUtils.unwrapAndCheckForCode(e2, 1046));
}
@Test
public void test_unwrapAndCheckForCode_nonSqlExceptionWithParent_parentGetsFound() {
Exception e1 = new SQLException("", "", 1046);
Exception e2 = new Exception(e1);
assertTrue(SQLExceptionUtils.unwrapAndCheckForCode(e2, 1046));
}
@Test
public void test_unwrapAndCheckForCode_nonSqlException_returnsFalse() {
Exception exception = new Exception();
assertFalse(SQLExceptionUtils.unwrapAndCheckForCode(exception, 1046));
}
}
| 6,137 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/util/ConfigTest.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.util.NoSuchElementException;
import java.util.Properties;
import org.junit.Test;
/**
* Tests for the Config.
*/
public class ConfigTest extends TestClass {
/*******************************************************************************************************************
* loadConfigFrom Tests
*
* File exists
* File does not exist (should just load from system properties)
******************************************************************************************************************/
@Test
public void test_loadConfigFrom_badFile() {
System.setProperty("test", "asdfasdf");
assertNotThrows(() -> {
Config config = Config.loadConfigFrom("asdfasdf");
assertEquals("asdfasdf", config.getStringPropertyWithDefault("test", null));
});
}
@Test
public void test_loadConfigFrom_goodFile() {
assertNotThrows(() -> {
Config config = Config.loadConfigFrom(Config.CONFIG_FILE_NAME);
assertEquals("asfd", config.getStringPropertyWithDefault("testProperty", null));
});
}
/*******************************************************************************************************************
* loadMainConfig Tests
*
* Just test happy path; it calls loadConfigFrom
******************************************************************************************************************/
@Test
public void test_loadMainConfig_goodFile() {
assertNotThrows(() -> {
Config config = Config.loadMainConfig();
assertEquals("asfd", config.getStringPropertyWithDefault("testProperty", null));
});
}
/*******************************************************************************************************************
* getSubconfig Tests
*
* No subproperties
* Has subproperties and null prefix
* Has subproperties and nonnull prefix
******************************************************************************************************************/
@Test
public void test_getSubconfig_noSubproperies() {
Properties props = new Properties();
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals(null, config.getSubconfig("asdf"));
}
@Test
public void test_getSubconfig_hasSubproperties_nullPrefix() {
Properties props = new Properties();
props.setProperty("asdf.hey", "hello");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
Config subconfig = config.getSubconfig("asdf");
assertFalse(subconfig.equals(null));
assertEquals("hello", subconfig.getStringPropertyWithDefault("hey", null));
assertEquals("asdf", getFieldFrom(subconfig, "prefix"));
}
@Test
public void test_getSubconfig_hasSubproperties_nonnullPrefix() {
Properties props = new Properties();
props.setProperty("asdf.hey", "hello");
Config config = (Config) callConstructorWithArguments(Config.class, "top", props);
Config subconfig = config.getSubconfig("asdf");
assertFalse(subconfig.equals(null));
assertEquals("hello", subconfig.getStringPropertyWithDefault("hey", null));
assertEquals("top.asdf", getFieldFrom(subconfig, "prefix"));
}
/*******************************************************************************************************************
* fullPropertyName Tests
*
* null prefix
* nonnull prefix
******************************************************************************************************************/
@Test
public void test_fullPropertyName_nullPrefix() {
Properties props = new Properties();
props.setProperty("asdf.hey", "hello");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals("asdf.hey", config.fullPropertyName("asdf.hey"));
}
@Test
public void test_fullPropertyName_nonnullPrefix() {
Properties props = new Properties();
props.setProperty("asdf.hey", "hello");
Config config = (Config) callConstructorWithArguments(Config.class, "top", props);
assertEquals("top.asdf.hey", config.fullPropertyName("asdf.hey"));
}
/*******************************************************************************************************************
* getStringPropertyWithDefault Tests
*
* has it
* doesn't have it
******************************************************************************************************************/
@Test
public void test_getStringPropertyWithDefault_propertySet() {
Properties props = new Properties();
props.setProperty("hey", "hello");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals("hello", config.getStringPropertyWithDefault("hey", "ho"));
}
@Test
public void test_getStringPropertyWithDefault_propertyNotSet() {
Properties props = new Properties();
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals("ho", config.getStringPropertyWithDefault("hey", "ho"));
}
/*******************************************************************************************************************
* getIntPropertyWithDefault Tests
*
* has it
* doesn't have it
* NumberFormatException
******************************************************************************************************************/
@Test
public void test_getIntPropertyWithDefault_propertySet() {
Properties props = new Properties();
props.setProperty("hey", "2");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals(2, config.getIntPropertyWithDefault("hey", 3));
}
@Test
public void test_getIntPropertyWithDefault_propertyNotSet() {
Properties props = new Properties();
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals(3, config.getIntPropertyWithDefault("hey", 3));
}
@Test
public void test_getIntPropertyWithDefault_propertySetBadly() {
Properties props = new Properties();
props.setProperty("hey", "asdf");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertThrows(PropertyException.class, () -> config.getIntPropertyWithDefault("hey", 3));
}
/*******************************************************************************************************************
* getLongPropertyWithDefault Tests
*
* has it
* doesn't have it
* NumberFormatException
******************************************************************************************************************/
@Test
public void test_getLongPropertyWithDefault_propertySet() {
Properties props = new Properties();
props.setProperty("hey", "2");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals(2, config.getLongPropertyWithDefault("hey", 3));
}
@Test
public void test_getLongPropertyWithDefault_propertyNotSet() {
Properties props = new Properties();
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals(3, config.getLongPropertyWithDefault("hey", 3));
}
@Test
public void test_getLongPropertyWithDefault_propertySetBadly() {
Properties props = new Properties();
props.setProperty("hey", "asdf");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertThrows(PropertyException.class, () -> config.getLongPropertyWithDefault("hey", 3));
}
/*******************************************************************************************************************
* getClassPropertyWithDefault Tests
*
* has it
* doesn't have it
* ClassNotFoundException
******************************************************************************************************************/
@Test
public void test_getClassPropertyWithDefault_propertySet() {
Properties props = new Properties();
props.setProperty("hey", "com.amazonaws.secretsmanager.util.ConfigTest");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals(this.getClass(), config.getClassPropertyWithDefault("hey", Object.class));
}
@Test
public void test_getClassPropertyWithDefault_propertyNotSet() {
Properties props = new Properties();
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals(Object.class, config.getClassPropertyWithDefault("hey", Object.class));
}
@Test
public void test_getClassPropertyWithDefault_propertySetBadly() {
Properties props = new Properties();
props.setProperty("hey", "comm.amazonaws.secretsmanager.util.ConfigTest");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertThrows(PropertyException.class, () -> config.getClassPropertyWithDefault("hey", Object.class));
}
/*******************************************************************************************************************
* getRequiredStringProperty Tests
*
* has it
* doesn't have it
******************************************************************************************************************/
@Test
public void test_getRequiredStringProperty_propertySet() {
Properties props = new Properties();
props.setProperty("hey", "hello");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals("hello", config.getRequiredStringProperty("hey"));
}
@Test
public void test_getRequiredStringProperty_propertyNotSet() {
Properties props = new Properties();
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertThrows(NoSuchElementException.class, () -> config.getRequiredStringProperty("hey"));
}
/*******************************************************************************************************************
* getRequiredIntProperty Tests
*
* has it
* doesn't have it
* NumberFormatException
******************************************************************************************************************/
@Test
public void test_getRequiredIntProperty_propertySet() {
Properties props = new Properties();
props.setProperty("hey", "2");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals(2, config.getRequiredIntProperty("hey"));
}
@Test
public void test_getRequiredIntProperty_propertyNotSet() {
Properties props = new Properties();
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertThrows(NoSuchElementException.class, () -> config.getRequiredIntProperty("hey"));
}
@Test
public void test_getRequiredIntProperty_propertySetBadly() {
Properties props = new Properties();
props.setProperty("hey", "asdf");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertThrows(PropertyException.class, () -> config.getRequiredIntProperty("hey"));
}
/*******************************************************************************************************************
* getRequiredLongProperty Tests
*
* has it
* doesn't have it
* NumberFormatException
******************************************************************************************************************/
@Test
public void test_getRequiredLongProperty_propertySet() {
Properties props = new Properties();
props.setProperty("hey", "2");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals(2, config.getRequiredLongProperty("hey"));
}
@Test
public void test_getRequiredLongProperty_propertyNotSet() {
Properties props = new Properties();
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertThrows(NoSuchElementException.class, () -> config.getRequiredLongProperty("hey"));
}
@Test
public void test_getRequiredLongProperty_propertySetBadly() {
Properties props = new Properties();
props.setProperty("hey", "asdf");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertThrows(PropertyException.class, () -> config.getRequiredLongProperty("hey"));
}
/*******************************************************************************************************************
* getRequiredClassProperty Tests
*
* has it
* doesn't have it
* ClassNotFoundException
******************************************************************************************************************/
@Test
public void test_getRequiredClassProperty_propertySet() {
Properties props = new Properties();
props.setProperty("hey", "com.amazonaws.secretsmanager.util.ConfigTest");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertEquals(this.getClass(), config.getRequiredClassProperty("hey"));
}
@Test
public void test_getRequiredClassProperty_propertyNotSet() {
Properties props = new Properties();
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertThrows(NoSuchElementException.class, () -> config.getRequiredClassProperty("hey"));
}
@Test
public void test_getRequiredClassProperty_propertySetBadly() {
Properties props = new Properties();
props.setProperty("hey", "comm.amazonaws.secretsmanager.util.ConfigTest");
Config config = (Config) callConstructorWithArguments(Config.class, null, props);
assertThrows(PropertyException.class, () -> config.getRequiredClassProperty("hey"));
}
}
| 6,138 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/util/JDBCSecretCacheBuilderProviderTest.java | package com.amazonaws.secretsmanager.util;
import static com.amazonaws.secretsmanager.util.JDBCSecretCacheBuilderProvider.PROPERTY_VPC_ENDPOINT_REGION;
import static com.amazonaws.secretsmanager.util.JDBCSecretCacheBuilderProvider.PROPERTY_VPC_ENDPOINT_URL;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.junit.Rule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.EnvironmentVariables;
import com.amazonaws.secretsmanager.sql.AWSSecretsManagerDriver;
import software.amazon.awssdk.core.exception.SdkClientException;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
public class JDBCSecretCacheBuilderProviderTest {
@Rule
public final EnvironmentVariables environmentVariables = new EnvironmentVariables();
/**
* SetRegion Tests.
*/
@Test
public void test_setRegion_configFileProperty() {
Config configProvider = mock(Config.class);
String regionName = AWSSecretsManagerDriver.PROPERTY_PREFIX + "."
+ JDBCSecretCacheBuilderProvider.PROPERTY_REGION;
when(configProvider.getStringPropertyWithDefault(regionName, null)).thenReturn("us-west-2");
SecretsManagerClient client = new JDBCSecretCacheBuilderProvider(configProvider).build().build();
assertEquals(client.serviceClientConfiguration().region(), Region.US_WEST_2);
}
@Test
public void test_setRegion_environmentVariable() {
Config configProvider = mock(Config.class);
String environmentRegionName = JDBCSecretCacheBuilderProvider.REGION_ENVIRONMENT_VARIABLE;
environmentVariables.set(environmentRegionName, "us-east-1");
assertEquals("us-east-1", System.getenv(environmentRegionName));
SecretsManagerClient client = new JDBCSecretCacheBuilderProvider(configProvider).build().build();
assertEquals(client.serviceClientConfiguration().region(), Region.US_EAST_1);
}
@Test
public void test_setRegion_vpcEndpoint() {
Config configProvider = mock(Config.class);
String vpcEndpointUrlName = AWSSecretsManagerDriver.PROPERTY_PREFIX + "." + PROPERTY_VPC_ENDPOINT_URL;
String vpcEndpointRegion = AWSSecretsManagerDriver.PROPERTY_PREFIX + "." + PROPERTY_VPC_ENDPOINT_REGION;
String vpcEndpointUrlString = "https://asdf.us-west-2.amazonaws.com";
when(configProvider.getStringPropertyWithDefault(vpcEndpointUrlName, null)).thenReturn(vpcEndpointUrlString);
when(configProvider.getStringPropertyWithDefault(vpcEndpointRegion, null)).thenReturn("ap-southeast-3");
SecretsManagerClient client = new JDBCSecretCacheBuilderProvider(configProvider).build().build();
assertEquals(client.serviceClientConfiguration().endpointOverride().get().toString(), vpcEndpointUrlString);
assertEquals(client.serviceClientConfiguration().region(), Region.AP_SOUTHEAST_3);
}
@Test
public void test_setRegion_defaultsToEnv() {
try {
new JDBCSecretCacheBuilderProvider().build().build();
} catch (SdkClientException e) {
assertTrue(e.getMessage().startsWith("Unable to load region from any of the providers in the chain"));
}
}
/**
* SetRegion priority tests.
*/
@Test
public void test_regionSelectionOrder_prefersVpcEndpointOverEverything() {
Config configProvider = mock(Config.class);
// Arrange so all properties return something valid.
String regionName = AWSSecretsManagerDriver.PROPERTY_PREFIX + "."
+ JDBCSecretCacheBuilderProvider.PROPERTY_REGION;
String vpcEndpointUrlName = AWSSecretsManagerDriver.PROPERTY_PREFIX + "." + PROPERTY_VPC_ENDPOINT_URL;
String vpcEndpointRegion = AWSSecretsManagerDriver.PROPERTY_PREFIX + "." + PROPERTY_VPC_ENDPOINT_REGION;
String environmentRegionName = JDBCSecretCacheBuilderProvider.REGION_ENVIRONMENT_VARIABLE;
String vpcEndpointUrlString = "https://1234.secretsmanager.amazonaws.com";
// Arrange the return values when the properties are requested.
environmentVariables.set(environmentRegionName, "us-east-2");
when(configProvider.getStringPropertyWithDefault(regionName, null)).thenReturn("us-east-1");
when(configProvider.getStringPropertyWithDefault(vpcEndpointUrlName, null))
.thenReturn(vpcEndpointUrlString);
when(configProvider.getStringPropertyWithDefault(vpcEndpointRegion, null)).thenReturn("us-west-2");
// Act: Build our client
SecretsManagerClient client = new JDBCSecretCacheBuilderProvider(configProvider).build().build();
// Assert: Make sure the endpoint was configured properly.
assertNotEquals(client.serviceClientConfiguration().region(), Region.US_EAST_2);
assertNotEquals(client.serviceClientConfiguration().region(), Region.US_EAST_1);
assertEquals(client.serviceClientConfiguration().region(), Region.US_WEST_2);
assertEquals(client.serviceClientConfiguration().endpointOverride().get().toString(),
vpcEndpointUrlString);
}
@Test
public void test_regionSelectionOrder_prefersEnvironmentVarOverConfig() {
Config configProvider = mock(Config.class);
String regionName = AWSSecretsManagerDriver.PROPERTY_PREFIX + "."
+ JDBCSecretCacheBuilderProvider.PROPERTY_REGION;
String environmentRegionName = JDBCSecretCacheBuilderProvider.REGION_ENVIRONMENT_VARIABLE;
environmentVariables.set(environmentRegionName, "eu-west-3");
when(configProvider.getStringPropertyWithDefault(regionName, null)).thenReturn("us-east-2");
SecretsManagerClient client = new JDBCSecretCacheBuilderProvider(configProvider).build().build();
assertNotEquals(client.serviceClientConfiguration().region(), Region.US_EAST_2);
assertEquals(client.serviceClientConfiguration().region(), Region.EU_WEST_3);
}
/**
* Variables must be correctly set
*/
@Test
public void test_settingValidation_emptyConfigPropertyIgnored() {
Config configProvider = mock(Config.class);
String regionName = AWSSecretsManagerDriver.PROPERTY_PREFIX + "."
+ JDBCSecretCacheBuilderProvider.PROPERTY_REGION;
when(configProvider.getStringPropertyWithDefault(regionName, null)).thenReturn("");
try {
new JDBCSecretCacheBuilderProvider(configProvider).build().build();
} catch (SdkClientException e) {
assertTrue(e.getMessage().startsWith("Unable to load region from any of the providers in the chain"));
}
}
@Test
public void test_settingValidation_nullConfigPropertyIgnored() {
Config configProvider = mock(Config.class);
String regionName = AWSSecretsManagerDriver.PROPERTY_PREFIX + "."
+ JDBCSecretCacheBuilderProvider.PROPERTY_REGION;
when(configProvider.getStringPropertyWithDefault(regionName, null)).thenReturn("");
try {
new JDBCSecretCacheBuilderProvider(configProvider).build().build();
} catch (SdkClientException e) {
assertTrue(e.getMessage().startsWith("Unable to load region from any of the providers in the chain"));
}
}
@Test
public void test_settingValidation_emptyEnvironmentVariableIgnored() {
Config configProvider = mock(Config.class);
String environmentRegionName = JDBCSecretCacheBuilderProvider.REGION_ENVIRONMENT_VARIABLE;
environmentVariables.set(environmentRegionName, "");
try {
new JDBCSecretCacheBuilderProvider(configProvider).build().build();
} catch (SdkClientException e) {
assertTrue(e.getMessage().startsWith("Unable to load region from any of the providers in the chain"));
}
}
@Test
public void test_settingValidation_nullEnvironmentVariableIgnored() {
Config configProvider = mock(Config.class);
String environmentRegionName = JDBCSecretCacheBuilderProvider.REGION_ENVIRONMENT_VARIABLE;
environmentVariables.clear(environmentRegionName);
try {
new JDBCSecretCacheBuilderProvider(configProvider).build().build();
} catch (SdkClientException e) {
assertTrue(e.getMessage().startsWith("Unable to load region from any of the providers in the chain"));
}
}
@Test
public void test_settingValidation_emptyVpcIgnored() {
Config configProvider = mock(Config.class);
String vpcEndpointUrlName = AWSSecretsManagerDriver.PROPERTY_PREFIX + "." + PROPERTY_VPC_ENDPOINT_URL;
String vpcEndpointRegion = AWSSecretsManagerDriver.PROPERTY_PREFIX + "." + PROPERTY_VPC_ENDPOINT_REGION;
when(configProvider.getStringPropertyWithDefault(vpcEndpointUrlName, null)).thenReturn("");
when(configProvider.getStringPropertyWithDefault(vpcEndpointRegion, null)).thenReturn("");
try {
SecretsManagerClient client = new JDBCSecretCacheBuilderProvider(configProvider).build().build();
assertTrue(client.serviceClientConfiguration().endpointOverride().isEmpty());
} catch (SdkClientException e) {
assertTrue(e.getMessage().startsWith("Unable to load region from any of the providers in the chain"));
}
}
@Test
public void test_settingValidation_nullVpcIgnored() {
Config configProvider = mock(Config.class);
String vpcEndpointUrlName = AWSSecretsManagerDriver.PROPERTY_PREFIX + "." + PROPERTY_VPC_ENDPOINT_URL;
String vpcEndpointRegion = AWSSecretsManagerDriver.PROPERTY_PREFIX + "." + PROPERTY_VPC_ENDPOINT_REGION;
when(configProvider.getStringPropertyWithDefault(vpcEndpointUrlName, null)).thenReturn(null);
when(configProvider.getStringPropertyWithDefault(vpcEndpointRegion, null)).thenReturn(null);
try {
SecretsManagerClient client = new JDBCSecretCacheBuilderProvider(configProvider).build().build();
assertTrue(client.serviceClientConfiguration().endpointOverride().isEmpty());
} catch (SdkClientException e) {
assertTrue(e.getMessage().startsWith("Unable to load region from any of the providers in the chain"));
}
}
}
| 6,139 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerDummyDriver.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.caching.SecretCacheConfiguration;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClientBuilder;
/**
* Dummy database driver wrapper.
*/
public class AWSSecretsManagerDummyDriver extends AWSSecretsManagerDriver {
public static final String SUBPREFIX = "dummy";
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with default options.
*/
public AWSSecretsManagerDummyDriver() {
super();
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Uses the passed in SecretCache.
*/
public AWSSecretsManagerDummyDriver(SecretCache cache) {
super(cache);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the passed in client builder.
*
* @param builder Builder used to instantiate cache
*/
public AWSSecretsManagerDummyDriver(SecretsManagerClientBuilder builder) {
super(builder);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided AWS Secrets Manager client.
*
* @param client AWS Secrets Manager client to instantiate cache
*/
public AWSSecretsManagerDummyDriver(SecretsManagerClient client) {
super(client);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided cache configuration.
*
* @param cacheConfig Cache configuration to instantiate cache
*/
public AWSSecretsManagerDummyDriver(SecretCacheConfiguration cacheConfig) {
super(cacheConfig);
}
@Override
public String getPropertySubprefix() {
return SUBPREFIX;
}
public boolean exceptionIsDueToAuth;
@Override
public boolean isExceptionDueToAuthenticationError(Exception e) {
return exceptionIsDueToAuth;
}
@Override
public String constructUrlFromEndpointPortDatabase(String endpoint, String port, String dbname) {
return "mysuperconnectionurl";
}
@Override
public String getDefaultDriverClass() {
return "com.amazonaws.secretsmanager.sql.DummyDriver";
}
}
| 6,140 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerOracleDriverTest.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.sql.SQLException;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.SuppressStaticInitializationFor;
import org.powermock.modules.junit4.PowerMockRunner;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.util.TestClass;
/**
* Tests for the Oracle Driver.
*/
@RunWith(PowerMockRunner.class)
@SuppressStaticInitializationFor("com.amazonaws.secretsmanager.sql.AWSSecretsManagerOracleDriver")
@PowerMockIgnore("jdk.internal.reflect.*")
public class AWSSecretsManagerOracleDriverTest extends TestClass {
private AWSSecretsManagerOracleDriver sut;
@Mock
private SecretCache cache;
@Before
public void setup() {
System.setProperty("drivers.oracle.realDriverClass", "com.amazonaws.secretsmanager.sql.DummyDriver");
MockitoAnnotations.initMocks(this);
try {
sut = new AWSSecretsManagerOracleDriver(cache);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Test
public void test_getPropertySubprefix() {
assertEquals("oracle", sut.getPropertySubprefix());
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsTrue_correctExceptions() {
SQLException e = new SQLException("", "", 17079);
assertTrue(sut.isExceptionDueToAuthenticationError(e));
e = new SQLException("", "", 1017);
assertTrue(sut.isExceptionDueToAuthenticationError(e));
e = new SQLException("", "", 9911);
assertTrue(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_wrongSQLException() {
SQLException e = new SQLException("", "", 1046);
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_runtimeException() {
RuntimeException e = new RuntimeException("asdf");
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_constructUrl() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", "dev");
assertEquals(url, "jdbc:oracle:thin:@//test-endpoint:1234/dev");
}
@Test
public void test_constructUrlNullPort() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", null, "dev");
assertEquals(url, "jdbc:oracle:thin:@//test-endpoint/dev");
}
@Test
public void test_constructUrlNullDatabase() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", null);
assertEquals(url, "jdbc:oracle:thin:@//test-endpoint:1234");
}
@Test
public void test_getDefaultDriverClass() {
System.clearProperty("drivers.oracle.realDriverClass");
AWSSecretsManagerOracleDriver sut2 = new AWSSecretsManagerOracleDriver(cache);
assertEquals(getFieldFrom(sut2, "realDriverClass"), sut2.getDefaultDriverClass());
}
}
| 6,141 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerDriverTest.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Properties;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.SuppressStaticInitializationFor;
import org.powermock.modules.junit4.PowerMockRunner;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.caching.SecretCacheConfiguration;
import com.amazonaws.secretsmanager.util.TestClass;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClientBuilder;
/**
* Tests for AWSSecretsManagerDriver. Uses a config file in the resources folder just to make sure it can read from
* the file.
*/
@RunWith(PowerMockRunner.class)
@SuppressStaticInitializationFor({"com.amazonaws.secretsmanager.sql.*"})
@PowerMockIgnore("jdk.internal.reflect.*")
public class AWSSecretsManagerDriverTest extends TestClass {
private AWSSecretsManagerDummyDriver sut;
private static final String VALID_USER = "VALID_USER";
private static final String INVALID_USER = "INVALID_USER";
private static final String BAD_FORMAT_SECRET = "BAD_FORMAT_SECRET";
private static final String NEEDS_REFRESH_SECRET = "NEEDS_REFRESH_SECRET";
private static final String BAD_REFRESH_SECRET = "BAD_REFRESH_SECRET";
private static final String INVALID_AFTER_REFRESH = "INVALID_AFTER_REFRESH";
@Mock
private SecretCache cache;
boolean hasRefreshed;
@Before
public void setup() throws InterruptedException {
System.clearProperty("drivers.dummy.realDriverClass");
// Instantiate mocks
hasRefreshed = false;
MockitoAnnotations.initMocks(this);
Mockito.when(cache.getSecretString(Mockito.any(String.class))).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
Object[] arguments = invocation.getArguments();
if (arguments != null && arguments.length > 0 && arguments[0] != null){
String secretId = (String) arguments[0];
String returnUser = secretId;
if (INVALID_USER.equals(secretId)) {
return null;
} else if (BAD_FORMAT_SECRET.equals(secretId)) {
return "NotJSONFormat";
} else if (NEEDS_REFRESH_SECRET.equals(secretId) || BAD_REFRESH_SECRET.equals(secretId)) {
returnUser = hasRefreshed ? VALID_USER : DummyDriver.SQL_ERROR_USERNAME;
} else if (INVALID_AFTER_REFRESH.equals(secretId)) {
returnUser = DummyDriver.SQL_ERROR_USERNAME;
}
return String.format("{\"username\": \"%s\",\n\"password\": \"%s\",\n\"host\": \"%s\"}", returnUser, secretId, secretId);
}
return null;
}
});
Mockito.when(cache.refreshNow(Mockito.any(String.class))).thenAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
Object[] arguments = invocation.getArguments();
if (arguments != null && arguments.length > 0 && arguments[0] != null){
String secretId = (String) arguments[0];
if (BAD_REFRESH_SECRET.equals(secretId)) {
return false;
}
hasRefreshed = true;
return true;
}
return false;
}
});
// Instantiate the driver
sut = new AWSSecretsManagerDummyDriver(cache);
DummyDriver.reset();
try {
DriverManager.registerDriver(sut);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/*******************************************************************************************************************
* init Tests
******************************************************************************************************************/
@Test
public void test_init_constructor_null_params() {
try {
new AWSSecretsManagerDummyDriver((SecretsManagerClientBuilder) null);
} catch (Exception e) {}
try {
new AWSSecretsManagerDummyDriver((SecretCacheConfiguration) null);
} catch (Exception e) {}
try {
new AWSSecretsManagerDummyDriver((SecretsManagerClient) null);
} catch (Exception e) {}
}
@Test
public void test_init_works_realDriverFromConfig() {
System.setProperty("drivers.dummy.realDriverClass", "some.other.class");
AWSSecretsManagerDummyDriver sut2 = new AWSSecretsManagerDummyDriver(cache);
assertEquals(getFieldFrom(sut2, "realDriverClass"), "some.other.class");
}
/*******************************************************************************************************************
* getWrappedDriver Tests
******************************************************************************************************************/
@Test
public void test_getWrappedDriver_works_goodDriver() {
assertEquals(DummyDriver.instance, sut.getWrappedDriver());
}
@Test
public void test_getWrappedDriver_throws_badDriver() {
setFieldFrom(sut, "realDriverClass", "some.bad.class");
assertThrows(IllegalStateException.class, () -> sut.getWrappedDriver());
}
/*******************************************************************************************************************
* acceptsURL Tests
******************************************************************************************************************/
@Test
public void test_acceptsURL_throws_nullURL() {
assertThrows(SQLException.class, () -> sut.acceptsURL(null));
}
@Test
public void test_acceptsURL_returnsFalse_wrongURL() {
assertNotThrows(() -> assertFalse(sut.acceptsURL("jdbc-secretsmanager:wrongUrl")));
assertEquals(1, DummyDriver.acceptsURLCallCount);
}
@Test
public void test_acceptsURL_returnsTrue_correctURL() {
assertNotThrows(() -> assertTrue(sut.acceptsURL("jdbc-secretsmanager:expectedUrl")));
assertEquals(1, DummyDriver.acceptsURLCallCount);
}
@Test
public void test_acceptsURL_returnsFalse_JdbcUrl() {
assertNotThrows(() -> assertFalse(sut.acceptsURL("jdbc:expectedUrl")));
assertEquals(0, DummyDriver.acceptsURLCallCount);
}
@Test
public void test_acceptsURL_returnsTrue_secretId() {
assertNotThrows(() -> assertTrue(sut.acceptsURL("someSecretId")));
assertEquals(0, DummyDriver.acceptsURLCallCount);
}
/*******************************************************************************************************************
* connect Tests
******************************************************************************************************************/
@Test
public void test_connect_throws_nullURL() {
assertThrows(SQLException.class, () -> sut.connect(null, null));
}
@Test
public void test_connect_works_nullInfo() {
assertNotThrows(() -> sut.connect("jdbc-secretsmanager:expectedUrl", null));
assertEquals(1, DummyDriver.connectCallCount);
}
@Test
public void test_connect_works_nullUser() {
Properties props = new Properties();
assertNotThrows(() -> sut.connect("jdbc-secretsmanager:expectedUrl", props));
assertEquals(1, DummyDriver.connectCallCount);
}
@Test
public void test_connect_works_valid_url() {
Properties props = new Properties();
props.setProperty("user", "user");
assertNotThrows(() -> sut.connect("jdbc-secretsmanager:expectedUrl", props));
assertEquals(1, DummyDriver.connectCallCount);
}
@Test
public void test_connect_jdbc_returnsNull() throws SQLException {
Connection conn = sut.connect("jdbc:expectedUrl", null);
assertEquals(conn, null);
}
@Test
public void test_connect_works_secretId() {
Properties props = new Properties();
props.setProperty("user", "user");
assertNotThrows(() -> sut.connect("someSecretId", props));
assertEquals(1, DummyDriver.connectCallCount);
}
@Test
public void test_connect_works_withSecretRefresh() {
Properties props = new Properties();
props.setProperty("user", NEEDS_REFRESH_SECRET);
sut.exceptionIsDueToAuth = true;
assertNotThrows(() -> sut.connect("jdbc-secretsmanager:expectedUrl", props));
assertEquals(2, DummyDriver.connectCallCount);
}
@Test
public void test_connect_throws_afterRetryMax() {
Properties props = new Properties();
props.setProperty("user", INVALID_AFTER_REFRESH);
sut.exceptionIsDueToAuth = true;
assertThrows(SQLException.class, () -> sut.connect("jdbc-secretsmanager:expectedUrl", props));
assertEquals(AWSSecretsManagerDriver.MAX_RETRY + 1, DummyDriver.connectCallCount);
}
@Test
public void test_connect_throws_withBadRefresh() {
Properties props = new Properties();
props.setProperty("user", BAD_REFRESH_SECRET);
sut.exceptionIsDueToAuth = true;
assertThrows(SQLException.class, () -> sut.connect("jdbc-secretsmanager:expectedUrl", props));
assertEquals(1, DummyDriver.connectCallCount);
}
@Test
public void test_connect_rethrowsSQLException_onFailure() {
Properties props = new Properties();
props.setProperty("user", DummyDriver.SQL_ERROR_USERNAME);
sut.exceptionIsDueToAuth = false;
assertThrows(SQLException.class, () -> sut.connect("jdbc-secretsmanager:expectedUrl", props));
assertEquals(1, DummyDriver.connectCallCount);
}
@Test
public void test_connect_rethrowsRuntimeException_onFailure() {
Properties props = new Properties();
props.setProperty("user", DummyDriver.RUNTIME_ERROR_USERNAME);
sut.exceptionIsDueToAuth = false;
assertThrows(RuntimeException.class, () -> sut.connect("jdbc-secretsmanager:expectedUrl", props));
assertEquals(1, DummyDriver.connectCallCount);
}
@Test
public void test_connect_throws_badSecretId() {
Properties props = new Properties();
props.setProperty("user", "user");
assertThrows(IllegalArgumentException.class, () -> sut.connect(INVALID_USER, props));
assertEquals(0, DummyDriver.connectCallCount);
}
@Test
public void test_connect_throws_badlyFormattedSecretId() {
Properties props = new Properties();
props.setProperty("user", "user");
assertThrows(RuntimeException.class, () -> sut.connect(BAD_FORMAT_SECRET, props));
assertEquals(0, DummyDriver.connectCallCount);
}
@Test
public void test_connect_throws_userBadlyFormattedSecretId() {
Properties props = new Properties();
props.setProperty("user", BAD_FORMAT_SECRET);
assertThrows(RuntimeException.class, () -> sut.connect("jdbc-secretsmanager:expectedUrl", props));
assertEquals(0, DummyDriver.connectCallCount);
}
/*******************************************************************************************************************
* getMajorVersion Tests
******************************************************************************************************************/
@Test
public void test_getMajorVersion_propagatesToRealDriver() {
assertEquals(DummyDriver.GET_MAJOR_VERSION_RETURN_VALUE, sut.getMajorVersion());
assertEquals(1, DummyDriver.getMajorVersionCallCount);
}
/*******************************************************************************************************************
* getMinorVersion Tests
******************************************************************************************************************/
@Test
public void test_getMinorVersion_propagatesToRealDriver() {
assertEquals(DummyDriver.GET_MINOR_VERSION_RETURN_VALUE, sut.getMinorVersion());
assertEquals(1, DummyDriver.getMinorVersionCallCount);
}
/*******************************************************************************************************************
* getParentLogger Tests
******************************************************************************************************************/
@Test
public void test_getParentLogger_propagatesToRealDriver() {
assertNotThrows(() -> assertEquals(null, sut.getParentLogger()));
assertEquals(1, DummyDriver.getParentLoggerCallCount);
}
/*******************************************************************************************************************
* getPropertyInfo Tests
******************************************************************************************************************/
@Test
public void test_getPropertyInfo_propagatesToRealDriver() {
String param1 = "jdbc-secretsmanager:expectedUrl";
Properties param2 = new Properties();
assertNotThrows(() -> Assert.assertNull(sut.getPropertyInfo(param1, param2)));
assertEquals(1, DummyDriver.getPropertyInfoCallCount);
String param1Expected = "jdbc:expectedUrl";
assertEquals(param1Expected, DummyDriver.getPropertyInfoParam1);
assertSame(param2, DummyDriver.getPropertyInfoParam2);
}
/*******************************************************************************************************************
* jdbcCompliant Tests
******************************************************************************************************************/
@Test
public void test_jdbcCompliant_propagatesToRealDriver() {
assertEquals(true, sut.jdbcCompliant());
assertEquals(1, DummyDriver.jdbcCompliantCallCount);
}
}
| 6,142 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerRedshiftDriverTest.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.sql.SQLException;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.SuppressStaticInitializationFor;
import org.powermock.modules.junit4.PowerMockRunner;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.util.TestClass;
/**
* Tests for the Redshift Driver.
*/
@RunWith(PowerMockRunner.class)
@SuppressStaticInitializationFor("com.amazonaws.secretsmanager.sql.AWSSecretsManagerRedshiftDriver")
@PowerMockIgnore("jdk.internal.reflect.*")
public class AWSSecretsManagerRedshiftDriverTest extends TestClass {
private AWSSecretsManagerRedshiftDriver sut;
@Mock
private SecretCache cache;
@Before
public void setup() {
System.setProperty("drivers.redshift.realDriverClass", "com.amazonaws.secretsmanager.sql.DummyDriver");
MockitoAnnotations.initMocks(this);
try {
sut = new AWSSecretsManagerRedshiftDriver(cache);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Test
public void test_getPropertySubprefix() {
assertEquals("redshift", sut.getPropertySubprefix());
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsTrue_correctException() {
SQLException e = new SQLException("", "28P01");
assertTrue(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_wrongSQLException() {
SQLException e = new SQLException("", "28P02");
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_runtimeException() {
RuntimeException e = new RuntimeException("asdf");
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_constructUrl() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", "dev");
assertEquals(url, "jdbc:redshift://test-endpoint:1234/dev");
}
@Test
public void test_constructUrlNullPort() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", null, "dev");
assertEquals(url, "jdbc:redshift://test-endpoint/dev");
}
@Test
public void test_constructUrlNullDatabase() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", null);
assertEquals(url, "jdbc:redshift://test-endpoint:1234");
}
@Test
public void test_getDefaultDriverClass() {
System.clearProperty("drivers.redshift.realDriverClass");
AWSSecretsManagerRedshiftDriver sut2 = new AWSSecretsManagerRedshiftDriver(cache);
assertEquals(getFieldFrom(sut2, "realDriverClass"), sut2.getDefaultDriverClass());
}
}
| 6,143 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerPostgreSQLDriverTest.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.sql.SQLException;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.SuppressStaticInitializationFor;
import org.powermock.modules.junit4.PowerMockRunner;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.util.TestClass;
/**
* Tests for the PostgreSQL Driver.
*/
@RunWith(PowerMockRunner.class)
@SuppressStaticInitializationFor("com.amazonaws.secretsmanager.sql.AWSSecretsManagerPostgreSQLDriver")
@PowerMockIgnore("jdk.internal.reflect.*")
public class AWSSecretsManagerPostgreSQLDriverTest extends TestClass {
private AWSSecretsManagerPostgreSQLDriver sut;
@Mock
private SecretCache cache;
@Before
public void setup() {
System.setProperty("drivers.postgresql.realDriverClass", "com.amazonaws.secretsmanager.sql.DummyDriver");
MockitoAnnotations.initMocks(this);
try {
sut = new AWSSecretsManagerPostgreSQLDriver(cache);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Test
public void test_getPropertySubprefix() {
assertEquals("postgresql", sut.getPropertySubprefix());
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsTrue_correctException() {
SQLException e = new SQLException("", "28P01");
assertTrue(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_wrongSQLException() {
SQLException e = new SQLException("", "28P02");
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_runtimeException() {
RuntimeException e = new RuntimeException("asdf");
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_constructUrl() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", "dev");
assertEquals(url, "jdbc:postgresql://test-endpoint:1234/dev");
}
@Test
public void test_constructUrlNullPort() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", null, "dev");
assertEquals(url, "jdbc:postgresql://test-endpoint/dev");
}
@Test
public void test_constructUrlNullDatabase() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", null);
assertEquals(url, "jdbc:postgresql://test-endpoint:1234/");
}
@Test
public void test_getDefaultDriverClass() {
System.clearProperty("drivers.postgresql.realDriverClass");
AWSSecretsManagerPostgreSQLDriver sut2 = new AWSSecretsManagerPostgreSQLDriver(cache);
assertEquals(getFieldFrom(sut2, "realDriverClass"), sut2.getDefaultDriverClass());
}
}
| 6,144 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerMySQLDriverTest.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.sql.SQLException;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.SuppressStaticInitializationFor;
import org.powermock.modules.junit4.PowerMockRunner;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.util.TestClass;
/**
* Tests for the MySQL Driver.
*/
@RunWith(PowerMockRunner.class)
@SuppressStaticInitializationFor("com.amazonaws.secretsmanager.sql.AWSSecretsManagerMySQLDriver")
@PowerMockIgnore("jdk.internal.reflect.*")
public class AWSSecretsManagerMySQLDriverTest extends TestClass {
private AWSSecretsManagerMySQLDriver sut;
@Mock
private SecretCache cache;
@Before
public void setup() {
System.setProperty("drivers.mysql.realDriverClass", "com.amazonaws.secretsmanager.sql.DummyDriver");
MockitoAnnotations.initMocks(this);
try {
sut = new AWSSecretsManagerMySQLDriver(cache);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Test
public void test_getPropertySubprefix() {
assertEquals("mysql", sut.getPropertySubprefix());
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsTrue_correctException() {
SQLException e = new SQLException("", "", 1045);
assertTrue(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_wrongSQLException() {
SQLException e = new SQLException("", "", 1046);
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_runtimeException() {
RuntimeException e = new RuntimeException("asdf");
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_constructUrl() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", "dev");
assertEquals(url, "jdbc:mysql://test-endpoint:1234/dev");
}
@Test
public void test_constructUrlNullPort() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", null, "dev");
assertEquals(url, "jdbc:mysql://test-endpoint/dev");
}
@Test
public void test_constructUrlNullDatabase() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", null);
assertEquals(url, "jdbc:mysql://test-endpoint:1234");
}
@Test
public void test_getDefaultDriverClass() {
System.clearProperty("drivers.mysql.realDriverClass");
AWSSecretsManagerMySQLDriver sut2 = new AWSSecretsManagerMySQLDriver(cache);
assertEquals(getFieldFrom(sut2, "realDriverClass"), sut2.getDefaultDriverClass());
}
}
| 6,145 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerMariaDBDriverTest.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.sql.SQLException;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.SuppressStaticInitializationFor;
import org.powermock.modules.junit4.PowerMockRunner;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.util.TestClass;
/**
* Tests for the MariaDB Driver.
*/
@RunWith(PowerMockRunner.class)
@SuppressStaticInitializationFor("com.amazonaws.secretsmanager.sql.AWSSecretsManagerMariaDBDriver")
@PowerMockIgnore("jdk.internal.reflect.*")
public class AWSSecretsManagerMariaDBDriverTest extends TestClass {
private AWSSecretsManagerMariaDBDriver sut;
@Mock
private SecretCache cache;
@Before
public void setup() {
System.setProperty("drivers.mariadb.realDriverClass", "com.amazonaws.secretsmanager.sql.DummyDriver");
MockitoAnnotations.initMocks(this);
try {
sut = new AWSSecretsManagerMariaDBDriver(cache);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Test
public void test_getPropertySubprefix() {
assertEquals("mariadb", sut.getPropertySubprefix());
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsTrue_correctException() {
SQLException e = new SQLException("", "", 1045);
assertTrue(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_wrongSQLException() {
SQLException e = new SQLException("", "", 1046);
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_runtimeException() {
RuntimeException e = new RuntimeException("asdf");
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_constructUrl() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", "dev");
assertEquals(url, "jdbc:mariadb://test-endpoint:1234/dev");
}
@Test
public void test_constructUrlNullPort() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", null, "dev");
assertEquals(url, "jdbc:mariadb://test-endpoint/dev");
}
@Test
public void test_constructUrlNullDatabase() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", null);
assertEquals(url, "jdbc:mariadb://test-endpoint:1234");
}
@Test
public void test_getDefaultDriverClass() {
System.clearProperty("drivers.mariadb.realDriverClass");
AWSSecretsManagerMariaDBDriver sut2 = new AWSSecretsManagerMariaDBDriver(cache);
assertEquals(getFieldFrom(sut2, "realDriverClass"), sut2.getDefaultDriverClass());
}
}
| 6,146 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/sql/DummyDriver.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import java.sql.Driver;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.DriverPropertyInfo;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.Properties;
import java.util.logging.Logger;
/**
* Dummy driver.
*/
public class DummyDriver implements Driver {
public static DummyDriver instance;
static {
instance = new DummyDriver();
try {
DriverManager.registerDriver(instance);
} catch (SQLException e) {
throw new RuntimeException("Driver could not be registered.", e);
}
}
public static int acceptsURLCallCount;
@Override
public boolean acceptsURL(String url) throws SQLException {
acceptsURLCallCount++;
return "jdbc:expectedUrl".equals(url);
}
public static final String SQL_ERROR_USERNAME = "SQL_ERROR_USERNAME";
public static final String RUNTIME_ERROR_USERNAME = "RUNTIME_ERROR_USERNAME";
public static int connectCallCount;
@Override
public Connection connect(String url, Properties info) throws SQLException {
connectCallCount++;
if (info != null && SQL_ERROR_USERNAME.equals(info.getProperty("user"))) {
throw new SQLException("Invalid SQL Exception!");
} else if (info != null && RUNTIME_ERROR_USERNAME.equals(info.getProperty("user"))) {
throw new RuntimeException("Invalid Runtime Exception!");
}
return null;
}
public static int getMajorVersionCallCount;
public static final int GET_MAJOR_VERSION_RETURN_VALUE = 87;
@Override
public int getMajorVersion() {
getMajorVersionCallCount++;
return GET_MAJOR_VERSION_RETURN_VALUE;
}
public static int getMinorVersionCallCount;
public static final int GET_MINOR_VERSION_RETURN_VALUE = 75;
@Override
public int getMinorVersion() {
getMinorVersionCallCount++;
return GET_MINOR_VERSION_RETURN_VALUE;
}
public static int getParentLoggerCallCount;
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
getParentLoggerCallCount++;
return null;
}
public static int getPropertyInfoCallCount;
public static String getPropertyInfoParam1;
public static Properties getPropertyInfoParam2;
@Override
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
getPropertyInfoCallCount++;
getPropertyInfoParam1 = url;
getPropertyInfoParam2 = info;
return null;
}
public static int jdbcCompliantCallCount;
@Override
public boolean jdbcCompliant() {
jdbcCompliantCallCount++;
return true;
}
public static void reset() {
acceptsURLCallCount = 0;
connectCallCount = 0;
getMajorVersionCallCount = 0;
getMinorVersionCallCount = 0;
getParentLoggerCallCount = 0;
jdbcCompliantCallCount = 0;
getPropertyInfoParam1 = null;
getPropertyInfoParam2 = null;
}
}
| 6,147 |
0 | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/test/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerMSSQLServerDriverTest.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.sql.SQLException;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.SuppressStaticInitializationFor;
import org.powermock.modules.junit4.PowerMockRunner;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.util.TestClass;
/**
* Tests for the MSSQL Driver.
*/
@RunWith(PowerMockRunner.class)
@SuppressStaticInitializationFor("com.amazonaws.secretsmanager.sql.AWSSecretsManagerMSSQLServerDriver")
@PowerMockIgnore("jdk.internal.reflect.*")
public class AWSSecretsManagerMSSQLServerDriverTest extends TestClass {
private AWSSecretsManagerMSSQLServerDriver sut;
@Mock
private SecretCache cache;
@Before
public void setup() {
System.setProperty("drivers.sqlserver.realDriverClass", "com.amazonaws.secretsmanager.sql.DummyDriver");
MockitoAnnotations.initMocks(this);
try {
sut = new AWSSecretsManagerMSSQLServerDriver(cache);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Test
public void test_getPropertySubprefix() {
assertEquals("sqlserver", sut.getPropertySubprefix());
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsTrue_correctException() {
SQLException e = new SQLException("", "", 18456);
assertTrue(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_wrongSQLException() {
SQLException e = new SQLException("", "", 18457);
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_isExceptionDueToAuthenticationError_returnsFalse_runtimeException() {
RuntimeException e = new RuntimeException("asdf");
assertFalse(sut.isExceptionDueToAuthenticationError(e));
}
@Test
public void test_constructUrl() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", "dev");
assertEquals(url, "jdbc:sqlserver://test-endpoint:1234;databaseName=dev;");
}
@Test
public void test_constructUrlNullPort() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", null, "dev");
assertEquals(url, "jdbc:sqlserver://test-endpoint;databaseName=dev;");
}
@Test
public void test_constructUrlNullDatabase() {
String url = sut.constructUrlFromEndpointPortDatabase("test-endpoint", "1234", null);
assertEquals(url, "jdbc:sqlserver://test-endpoint:1234");
}
@Test
public void test_getDefaultDriverClass() {
System.clearProperty("drivers.sqlserver.realDriverClass");
AWSSecretsManagerMSSQLServerDriver sut2 = new AWSSecretsManagerMSSQLServerDriver(cache);
assertEquals(getFieldFrom(sut2, "realDriverClass"), sut2.getDefaultDriverClass());
}
}
| 6,148 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/util/JDBCSecretCacheBuilderProvider.java | package com.amazonaws.secretsmanager.util;
import java.net.URI;
import com.amazonaws.secretsmanager.sql.AWSSecretsManagerDriver;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClientBuilder;
import software.amazon.awssdk.utils.StringUtils;
/**
* <p>
* A class for providing JDBC driver the secrets cache builder.
*
* Checks the config file and environment variables for overrides to the default
* region and applies those changes to the provided secret cache builder.
* </p>
*/
public class JDBCSecretCacheBuilderProvider {
/**
* Configuration property to override PrivateLink DNS URL for Secrets Manager
*/
static final String PROPERTY_VPC_ENDPOINT_URL = "vpcEndpointUrl";
static final String PROPERTY_VPC_ENDPOINT_REGION = "vpcEndpointRegion";
/**
* Configuration properties to override the default region
*/
static final String PROPERTY_REGION = "region";
static final String REGION_ENVIRONMENT_VARIABLE = "AWS_SECRET_JDBC_REGION";
private Config configFile;
public JDBCSecretCacheBuilderProvider() {
this(Config.loadMainConfig());
}
public JDBCSecretCacheBuilderProvider(Config config) {
configFile = config;
}
/**
* Provides the secrets cache builder.
*
* 1) If a PrivateLink DNS endpoint URL and region are given in the Config, then they are used to configure the endpoint.
* 2) The AWS_SECRET_JDBC_REGION environment variable is checked. If set, it is used to configure the region.
* 3) The region variable file is checked in the provided Config and, if set, used to configure the region.
* 4) Finally, if none of these are not found, the default region provider chain is used.
*
* @return the built secret cache.
*/
public SecretsManagerClientBuilder build() {
SecretsManagerClientBuilder builder = SecretsManagerClient.builder();
//Retrieve data from information sources.
String vpcEndpointUrl = configFile.getStringPropertyWithDefault(AWSSecretsManagerDriver.PROPERTY_PREFIX+"."+PROPERTY_VPC_ENDPOINT_URL, null);
String vpcEndpointRegion = configFile.getStringPropertyWithDefault(AWSSecretsManagerDriver.PROPERTY_PREFIX+"."+PROPERTY_VPC_ENDPOINT_REGION, null);
String envRegion = System.getenv(REGION_ENVIRONMENT_VARIABLE);
String configRegion = configFile.getStringPropertyWithDefault(AWSSecretsManagerDriver.PROPERTY_PREFIX+"."+PROPERTY_REGION, null);
// Apply settings to our builder configuration.
if (StringUtils.isNotBlank(vpcEndpointUrl) && StringUtils.isNotBlank(vpcEndpointRegion)) {
builder.endpointOverride(URI.create(vpcEndpointUrl)).region(Region.of(vpcEndpointRegion));
} else if (StringUtils.isNotBlank(envRegion)) {
builder.region(Region.of(envRegion));
} else if (StringUtils.isNotBlank(configRegion)) {
builder.region(Region.of(configRegion));
}
return builder;
}
}
| 6,149 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/util/PropertyException.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.util;
/**
* <p>
* Thrown when there is an issue related to a property set in the configuration for this library.
* </p>
*/
public class PropertyException extends RuntimeException {
/**
* Public constructor.
*
* @param message The reason for this exception.
* @param cause The exception that caused this one.
*/
public PropertyException(String message, Throwable cause) {
super(message, cause);
}
/**
* Public constructor.
*
* @param message The reason for this exception.
*/
public PropertyException(String message) {
super(message);
}
}
| 6,150 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/util/Config.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.util;
import java.io.IOException;
import java.io.InputStream;
import java.util.Enumeration;
import java.util.NoSuchElementException;
import java.util.Properties;
import lombok.EqualsAndHashCode;
/**
* <p>
* A class for accessing configuration information from a properties file or from the System properties. Properties
* defined in the file will override the properties set in the System properties. The properties file should be
* located somewhere on the class path for the class loader used to load this class.
* </p>
*
* <p>
* The default file that properties will be fetched from is referred to by <code>Config.CONFIG_FILE_NAME</code>.
* </p>
*/
@EqualsAndHashCode
public final class Config {
/**
* The name of the properties file used for configuration; "secretsmanager.properties".
*/
public static final String CONFIG_FILE_NAME = "secretsmanager.properties";
private Properties config;
private String prefix;
/**
* Private constructor to load the properties.
*
* @param prefix The prefix of the properties used by the config that
* this subconfig was extracted from.
* @param config The properties that this config should contain.
*/
private Config(String prefix, Properties config) {
this.config = config;
this.prefix = prefix;
}
/**
* Loads the configuration properties from the specified config file. Defaults will be the System properties if the
* file is not present.
*
* @param resourceName The name of the config file to load from.
*
* @return Properties The properties that this object should serve.
*/
private static Properties loadPropertiesFromConfigFile(String resourceName) {
Properties newConfig = new Properties(System.getProperties());
InputStream configFile;
try {
configFile = Thread.currentThread().getContextClassLoader().getResourceAsStream(resourceName);
if(configFile != null) {
newConfig.load(configFile);
configFile.close();
}
} catch (IOException e) {
throw new PropertyException("An error occured when loading the property file, " + CONFIG_FILE_NAME, e);
}
return newConfig;
}
/**
* Loads a configuration from a specific configuration file. Will use the System properties as defaults.
*
* @param resourceName The name of the config file to load from.
*
* @return Config A new <code>Config</code> with the properties from the
* file and the system properties as defaults.
*/
public static Config loadConfigFrom(String resourceName) {
return new Config(null, Config.loadPropertiesFromConfigFile(resourceName));
}
/**
* Loads a configuration from <code>CONFIG_FILE_NAME</code>. Will use the System properties as defaults.
*
* @return Config A new <code>Config</code> with the properties from the
* <code>CONFIG_FILE_NAME</code> file and the system
* properties as defaults.
*/
public static Config loadMainConfig() {
return loadConfigFrom(CONFIG_FILE_NAME);
}
/**
* Consumes a full property name and checks if it lies beneath the given subprefix.
*
* @param propertyName The full property name to check.
* @param subprefix The subprefix to check for membership in.
*
* @return boolean Whether or not the <code>propertyName</code> falls under
* the <code>subprefix</code>.
*/
private boolean isSubproperty(String propertyName, String subprefix) {
return propertyName.indexOf(subprefix + ".") == 0;
}
/**
* Get the subproperty from the property by removing the subprefix.
*
* @param fullPropertyName The property name to remove the subprefix from.
* @param subprefix The subprefix to remove.
*
* @return String The property name with the subprefix removed from the
* beginning.
*/
private String getSubproperty(String fullPropertyName, String subprefix) {
return fullPropertyName.substring(subprefix.length() + 1);
}
/**
* Extracts all of the properties for a given subprefix into its own <code>Config</code> object. The property
* names will be changed to no longer have the subprefix.
*
* @param subprefix The subprefix to get all of the properties for.
*
* @return Config Configuration properties for the subprefix
*/
@SuppressWarnings("unchecked")
public Config getSubconfig(String subprefix) {
Enumeration<String> propertyNames = (Enumeration<String>) config.propertyNames();
Properties subconfig = null;
while (propertyNames.hasMoreElements()) {
String name = propertyNames.nextElement();
if (isSubproperty(name, subprefix)) {
if (subconfig == null) {
subconfig = new Properties();
}
String subpropertyName = getSubproperty(name, subprefix);
subconfig.setProperty(subpropertyName, config.getProperty(name));
}
}
if (subconfig == null) {
return null;
} else if (prefix != null) {
return new Config(prefix + "." + subprefix, subconfig);
} else {
return new Config(subprefix, subconfig);
}
}
/**
* Extends a property name to be the full version written in the configuration file. This full name is not
* necessarily the name that the property is indexed with in this <code>Config</code> object.
*
* @param propertyName The property name to extend.
*
* @return String The full property name as written in the configuration
* file.
*/
public String fullPropertyName(String propertyName) {
if (prefix != null) {
return prefix + "." + propertyName;
} else {
return propertyName;
}
}
/**
* Returns a <code>String</code> property or a default value if the property is not set.
*
* @param propertyName The name of the property to retrieve.
* @param defaultValue The default value to use.
*
* @return String The <code>String</code> property or a default value if
* the property is not set.
*/
public String getStringPropertyWithDefault(String propertyName, String defaultValue) {
String propertyValue = config.getProperty(propertyName);
if (propertyValue == null) {
return defaultValue;
} else {
return propertyValue;
}
}
/**
* Returns a <code>int</code> property or a default value if the property is not set.
*
* @param propertyName The name of the property to retrieve.
* @param defaultValue The default value to use.
*
* @return int The <code>int</code> property or a default value if
* the property is not set.
*
* @throws PropertyException If the property value is not a decimal <code>int</code>.
*/
public int getIntPropertyWithDefault(String propertyName, int defaultValue) {
String propertyValue = config.getProperty(propertyName);
if (propertyValue == null) {
return defaultValue;
} else {
try {
return Integer.parseInt(propertyValue);
} catch (NumberFormatException e) {
throw new PropertyException(fullPropertyName(propertyName) + " must be of type int. Please check "
+ Config.CONFIG_FILE_NAME + " or your system properties for typos.", e);
}
}
}
/**
* Returns a <code>long</code> property or a default value if the property is not set.
*
* @param propertyName The name of the property to retrieve.
* @param defaultValue The default value to use.
*
* @return long The <code>long</code> property or a default value if
* the property is not set.
*
* @throws PropertyException If the property value is not a decimal
* <code>long</code>.
*/
public long getLongPropertyWithDefault(String propertyName, long defaultValue) {
String propertyValue = config.getProperty(propertyName);
if (propertyValue == null) {
return defaultValue;
} else {
try {
return Long.parseLong(propertyValue);
} catch (NumberFormatException e) {
throw new PropertyException(fullPropertyName(propertyName) + " must be of type long. Please check "
+ Config.CONFIG_FILE_NAME + " or your system properties for typos.", e);
}
}
}
/**
* Returns a <code>Class</code> property or a default value if the property is not set.
*
* @param propertyName The name of the property to retrieve.
* @param defaultValue The default value to use.
*
* @return Class The <code>Class</code> property or a default value if
* the property is not set.
*
* @throws PropertyException If the class name does not exist in this class loader.
*/
public Class<?> getClassPropertyWithDefault(String propertyName, Class<?> defaultValue) {
String propertyValue = config.getProperty(propertyName);
if (propertyValue == null) {
return defaultValue;
} else {
try {
return Class.forName(propertyValue);
} catch (ClassNotFoundException e) {
throw new PropertyException(fullPropertyName(propertyName) + " must be a valid class name. Please check"
+ " " + Config.CONFIG_FILE_NAME + " or your system properties for typos.",
e);
}
}
}
/**
* Throws a <code>NoSuchElementException</code> if a value is not set for the given property name.
*
* @param propertyName The property to check.
*
* @throws NoSuchElementException If the property is not set.
*/
private void throwIfPropertyIsNotSet(String propertyName) {
if (config.getProperty(propertyName) == null) {
throw new NoSuchElementException(fullPropertyName(propertyName)
+ " property must be specified either in " + Config.CONFIG_FILE_NAME
+ " or in the system properties.");
}
}
/**
* Returns a <code>String</code> property or throws a <code>NoSuchElementException</code> if the property is not
* set.
*
* @param propertyName The name of the property to retrieve.
*
* @return String The <code>String</code> property or a default value if
* the property is not set.
*
* @throws NoSuchElementException If the property is not set.
*/
public String getRequiredStringProperty(String propertyName) {
throwIfPropertyIsNotSet(propertyName);
String propertyValue = config.getProperty(propertyName);
return propertyValue;
}
/**
* Returns a <code>int</code> property or throws a <code>NoSuchElementException</code> if the property is not set.
*
* @param propertyName The name of the property to retrieve.
*
* @return int The <code>int</code> property or a default value if
* the property is not set.
*
* @throws PropertyException If the property value is not a decimal <code>int</code>.
* @throws NoSuchElementException If the property is not set.
*/
public int getRequiredIntProperty(String propertyName) {
throwIfPropertyIsNotSet(propertyName);
return getIntPropertyWithDefault(propertyName, 0);
}
/**
* Returns a <code>long</code> property or throws a <code>NoSuchElementException</code> if the property is not set.
*
* @param propertyName The name of the property to retrieve.
*
* @return long The <code>long</code> property or a default value if
* the property is not set.
*
* @throws PropertyException If the property value is not a decimal
* <code>long</code>.
* @throws NoSuchElementException If the property is not set.
*/
public long getRequiredLongProperty(String propertyName) {
throwIfPropertyIsNotSet(propertyName);
return getLongPropertyWithDefault(propertyName, 0);
}
/**
* Returns a <code>Class</code> property or throws a <code>NoSuchElementException</code> if the property is not set.
*
* @param propertyName The name of the property to retrieve.
*
* @return Class The <code>Class</code> property or a default value if
* the property is not set.
*
* @throws PropertyException If the class name does not exist in this class loader.
* @throws NoSuchElementException If the property is not set.
*/
public Class<?> getRequiredClassProperty(String propertyName) {
throwIfPropertyIsNotSet(propertyName);
return getClassPropertyWithDefault(propertyName, null);
}
}
| 6,151 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/util/SQLExceptionUtils.java | package com.amazonaws.secretsmanager.util;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
public class SQLExceptionUtils {
/**
* Checks the thrown exception and all parent exceptions and returns true if
* a SQLException with a matching error code is found.
*
* @param t The SQLException to check
* @param errorCode The error code to check for.
* @return True if the exception or any parent exception is a SQL Exception
* and getErrorCode matches the error code. Otherwise, false.
*/
public static boolean unwrapAndCheckForCode(Throwable t, int errorCode) {
final List<Throwable> list = new ArrayList<>();
while (t != null && list.contains(t) == false) {
list.add(t);
if ( t instanceof SQLException && ((SQLException)t).getErrorCode() == errorCode ) {
return true;
}
t = t.getCause();
}
return false;
}
/**
* Hide constructor for static class
*/
private SQLExceptionUtils() { }
}
| 6,152 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/util/package-info.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* This package contains utility classes that support the rest of the codebase.
*/
package com.amazonaws.secretsmanager.util;
| 6,153 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerMySQLDriver.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.caching.SecretCacheConfiguration;
import com.amazonaws.secretsmanager.util.SQLExceptionUtils;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClientBuilder;
import software.amazon.awssdk.utils.StringUtils;
/**
* <p>
* Provides support for accessing MySQL databases using credentials stored within AWS Secrets Manager.
* </p>
*
* <p>
* This will also work for MariaDB, as the error codes are the same.
* </p>
*
* <p>
* Configuration properties are specified using the "mysql" subprefix (e.g drivers.mysql.realDriverClass).
* </p>
*/
public final class AWSSecretsManagerMySQLDriver extends AWSSecretsManagerDriver {
/**
* The MySQL error code for when a user logs in using an invalid password.
*
* See <a href="https://dev.mysql.com/doc/refman/5.5/en/error-messages-server.html">MySQL error codes</a>.
*/
public static final int ACCESS_DENIED_FOR_USER_USING_PASSWORD_TO_DATABASE = 1045;
/**
* Set to mysql.
*/
public static final String SUBPREFIX = "mysql";
static {
AWSSecretsManagerDriver.register(new AWSSecretsManagerMySQLDriver());
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with default options.
*/
public AWSSecretsManagerMySQLDriver() {
super();
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Uses the passed in SecretCache.
*
* @param cache Secret cache to use to retrieve secrets
*/
public AWSSecretsManagerMySQLDriver(SecretCache cache) {
super(cache);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the passed in client builder.
*
* @param builder Builder used to instantiate cache
*/
public AWSSecretsManagerMySQLDriver(SecretsManagerClientBuilder builder) {
super(builder);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided AWS Secrets Manager client.
*
* @param client AWS Secrets Manager client to instantiate cache
*/
public AWSSecretsManagerMySQLDriver(SecretsManagerClient client) {
super(client);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided cache configuration.
*
* @param cacheConfig Cache configuration to instantiate cache
*/
public AWSSecretsManagerMySQLDriver(SecretCacheConfiguration cacheConfig) {
super(cacheConfig);
}
@Override
public String getPropertySubprefix() {
return SUBPREFIX;
}
@Override
public boolean isExceptionDueToAuthenticationError(Exception e) {
return SQLExceptionUtils.unwrapAndCheckForCode(e, ACCESS_DENIED_FOR_USER_USING_PASSWORD_TO_DATABASE);
}
@Override
public String constructUrlFromEndpointPortDatabase(String endpoint, String port, String dbname) {
String url = "jdbc:mysql://" + endpoint;
if (StringUtils.isNotBlank(port)) {
url += ":" + port;
}
if (StringUtils.isNotBlank(dbname)) {
url += "/" + dbname;
}
return url;
}
@Override
public String getDefaultDriverClass() {
try {
Class.forName("com.mysql.cj.jdbc.Driver", false, this.getClass().getClassLoader());
return "com.mysql.cj.jdbc.Driver";
} catch (ClassNotFoundException e) {
return "com.mysql.jdbc.Driver";
}
}
}
| 6,154 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerDriver.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import java.io.IOException;
import java.sql.Connection;
import java.sql.Driver;
import java.sql.DriverManager;
import java.sql.DriverPropertyInfo;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.Enumeration;
import java.util.Properties;
import java.util.logging.Logger;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.caching.SecretCacheConfiguration;
import com.amazonaws.secretsmanager.util.Config;
import com.amazonaws.secretsmanager.util.JDBCSecretCacheBuilderProvider;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClientBuilder;
import software.amazon.awssdk.utils.StringUtils;
/**
* <p>
* Provides support for accessing SQL databases using credentials stored within AWS Secrets Manager. If this
* functionality is desired, then a subclass of this class should be specified as the JDBC driver for an application.
* </p>
*
* <p>
* The driver to propagate <code>connect</code> requests to should also be specified in the configuration. Doing this
* will cause the real driver to be registered once an instance of this driver is made (which will be when this driver
* is registered).
* </p>
*
* <p>
* This base class registers itself with the <code>java.sql.DriverManager</code> when its constructor is called. That
* means a subclass only needs to make a new instance of itself in its static block to register.
* </p>
*
* <p>
* This does not support including the user (secret ID) and password in the jdbc url, as JDBC url formats are database
* specific. If this functionality is desired, it must be implemented in a subclass.
* </p>
*
* <p>
* Ignores the password field, drawing a secret ID from the user field. The secret referred to by this field is
* expected to be in the standard JSON format used by the rotation lambdas provided by Secrets Manager:
* </p>
*
* <pre>
* {@code
* {
* "username": "xxxx",
* "password": "xxxx",
* ...
* }
* }
* </pre>
*
* <p>
* Here is a list of the configuration properties. The subprefix is an implementation specific String used to keep
* the properties for different drivers separate. For example, the MySQL driver wrapper might use mysql as its
* subprefix, making the full property name for the realDriverClass for the MySQL driver wrapper
* drivers.mysql.realDriverClass (all Driver properties will be prefixed with "drivers."). This String is defined by
* the method <code>getPropertySubprefix</code>.
* </p>
*
* <ul>
* <li>drivers.<i>subprefix</i>.realDriverClass - (optional) The class name of the driver to propagate calls to.
* If not specified, default for <i>subprefix</i> is used</li>
* </ul>
*/
public abstract class AWSSecretsManagerDriver implements Driver {
/**
* "jdbc-secretsmanager", so the JDBC URL should start with "jdbc-secretsmanager" instead of just "jdbc".
*/
public static final String SCHEME = "jdbc-secretsmanager";
/**
* Maximum number of times to retry connecting to DB on auth failures
*/
public static final int MAX_RETRY = 5;
/**
* "drivers", so all configuration properties start with "drivers.".
*/
public static final String PROPERTY_PREFIX = "drivers";
/**
* Message to return on the RuntimeException when secret string is invalid json
*/
public static final String INVALID_SECRET_STRING_JSON = "Could not parse SecretString JSON";
private SecretCache secretCache;
private String realDriverClass;
private Config config;
private ObjectMapper mapper = new ObjectMapper();
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with default options.
*/
protected AWSSecretsManagerDriver() {
this(new JDBCSecretCacheBuilderProvider().build());
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Sets the secret cache to the cache that was passed in.
*
* @param cache Secret cache to use to retrieve secrets
*/
@SuppressFBWarnings("MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR")
protected AWSSecretsManagerDriver(SecretCache cache) {
this.secretCache = cache;
setProperties();
AWSSecretsManagerDriver.register(this);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the passed in client builder.
*
* @param builder Builder used to instantiate cache
*/
protected AWSSecretsManagerDriver(SecretsManagerClientBuilder builder) {
this(new SecretCache(builder));
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided AWS Secrets Manager client.
*
* @param client AWS Secrets Manager client to instantiate cache
*/
protected AWSSecretsManagerDriver(SecretsManagerClient client) {
this(new SecretCache(client));
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided cache configuration.
*
* @param cacheConfig Cache configuration to instantiate cache
*/
protected AWSSecretsManagerDriver(SecretCacheConfiguration cacheConfig) {
this(new SecretCache(cacheConfig));
}
/**
* Sets general configuration properties that are unrelated to the API client.
*/
private void setProperties() {
this.config = Config.loadMainConfig().getSubconfig(PROPERTY_PREFIX + "." + getPropertySubprefix());
if (this.config == null) {
this.realDriverClass = getDefaultDriverClass();
return;
}
this.realDriverClass = this.config.getStringPropertyWithDefault("realDriverClass", getDefaultDriverClass());
}
/**
* Loads the real driver.
*
* @throws IllegalStateException When there is no class with the name
* <code>realDriverClass</code>
*/
private void loadRealDriver() {
try {
Class.forName(this.realDriverClass);
} catch (ClassNotFoundException e) {
throw new IllegalStateException("Could not load real driver with name, \"" + this.realDriverClass + "\".", e);
}
}
/**
* Called when the driver is deregistered to cleanup resources.
*/
private static void shutdown(AWSSecretsManagerDriver driver) {
driver.secretCache.close();
}
/**
* Registers a driver along with the <code>DriverAction</code> implementation.
*
* @param driver The driver to register.
*
* @throws RuntimeException If the driver could not be registered.
*/
protected static void register(AWSSecretsManagerDriver driver) {
try {
DriverManager.registerDriver(driver, () -> shutdown(driver));
} catch (SQLException e) {
throw new RuntimeException("Driver could not be registered.", e);
}
}
/**
* Gets the "subprefix" used for configuration properties for this driver. For example, if this method returns the
* String, "mysql", then the real driver that this will forward requests to would be set to
* drivers.mysql.realDriverClass in the properties file or in the system properties.
*
* @return String The subprefix to use for configuration properties.
*/
public abstract String getPropertySubprefix();
/**
* Replaces <code>SCHEME</code> in a jdbc url with "jdbc" in order to pass the url to the real driver.
*
* @param jdbcUrl The jdbc url with <code>SCHEME</code> as the scheme.
*
* @return String The jdbc url with the scheme changed.
*
* @throws IllegalArgumentException When the url does not start with <code>SCHEME</code>.
*/
private String unwrapUrl(String jdbcUrl) {
if (!jdbcUrl.startsWith(SCHEME)) {
throw new IllegalArgumentException("JDBC URL is malformed. Must use scheme, \"" + SCHEME + "\".");
}
return jdbcUrl.replaceFirst(SCHEME, "jdbc");
}
/**
* Returns an instance of the real <code>java.sql.Driver</code> that this should propagate calls to. The real
* driver is specified by the realDriverClass property.
*
* @return Driver The real <code>Driver</code> that calls should be
* propagated to.
*
* @throws IllegalStateException When there is no driver with the name
* <code>realDriverClass</code>
*/
public Driver getWrappedDriver() {
loadRealDriver();
Enumeration<Driver> availableDrivers = DriverManager.getDrivers();
while (availableDrivers.hasMoreElements()) {
Driver driver = availableDrivers.nextElement();
if (driver.getClass().getName().equals(this.realDriverClass)) {
return driver;
}
}
throw new IllegalStateException("No Driver has been registered with name, " + this.realDriverClass
+ ". Please check your system properties or " + Config.CONFIG_FILE_NAME
+ " for typos. Also ensure that the Driver registers itself.");
}
@Override
public boolean acceptsURL(String url) throws SQLException {
if (url == null) {
throw new SQLException("url cannot be null.");
}
if (url.startsWith(SCHEME)) {
// If this is a URL in our SCHEME, call the acceptsURL method of the wrapped driver
return getWrappedDriver().acceptsURL(unwrapUrl(url));
} else if (url.startsWith("jdbc:")) {
// For any other JDBC URL, return false
return false;
} else {
// We accept a secret ID as the URL so if the config is set, and it's not a JDBC URL, return true
return true;
}
}
/**
* Determines whether or not an <code>Exception</code> is due to an authentication failure with the remote
* database. This method is called during <code>connect</code> to decide if authentication needs to be attempted
* again with refreshed credentials. A good way to implement this is to look up the error codes that
* <code>java.sqlSQLException</code>s will have when an authentication failure occurs. These are database
* specific.
*
* @param exception The <code>Exception</code> to test.
*
* @return boolean Whether or not the <code>Exception</code> indicates that
* the credentials used for authentication are stale.
*/
public abstract boolean isExceptionDueToAuthenticationError(Exception exception);
/**
* Construct a database URL from the endpoint, port and database name. This method is called when the
* <code>connect</code> method is called with a secret ID instead of a URL.
*
* @param endpoint The endpoint retrieved from the secret cache
* @param port The port retrieved from the secret cache
* @param dbname The database name retrieved from the secret cache
*
* @return String The constructed URL based on the endpoint and port
*/
public abstract String constructUrlFromEndpointPortDatabase(String endpoint, String port, String dbname);
/**
* Get the default real driver class name for this driver.
*
* @return String The default real driver class name
*/
public abstract String getDefaultDriverClass();
/**
* Calls the real driver's <code>connect</code> method using credentials from a secret stored in AWS Secrets
* Manager.
*
* @param unwrappedUrl The jdbc url that the real driver will accept.
* @param info The information to pass along to the real driver. The
* user and password fields will be replaced with the
* credentials retrieved from Secrets Manager.
* @param credentialsSecretId The friendly name or ARN of the secret that stores the
* login credentials.
*
* @return Connection A database connection.
*
* @throws SQLException If there is an error from the driver or underlying
* database.
* @throws InterruptedException If there was an interruption during secret refresh.
*/
private Connection connectWithSecret(String unwrappedUrl, Properties info, String credentialsSecretId)
throws SQLException, InterruptedException {
int retryCount = 0;
while (retryCount++ <= MAX_RETRY) {
String secretString = secretCache.getSecretString(credentialsSecretId);
Properties updatedInfo = new Properties(info);
try {
JsonNode jsonObject = mapper.readTree(secretString);
updatedInfo.setProperty("user", jsonObject.get("username").asText());
updatedInfo.setProperty("password", jsonObject.get("password").asText());
} catch (IOException e) {
// Most likely to occur in the event that the data is not JSON.
// Or the secret's username and/or password fields have been
// removed entirely. Either scenario is most often a user error.
throw new RuntimeException(INVALID_SECRET_STRING_JSON);
}
try {
return getWrappedDriver().connect(unwrappedUrl, updatedInfo);
} catch (Exception e) {
if (isExceptionDueToAuthenticationError(e)) {
boolean refreshSuccess = this.secretCache.refreshNow(credentialsSecretId);
if (!refreshSuccess) {
throw(e);
}
}
else {
throw(e);
}
}
}
// Max retries reached
throw new SQLException("Connect failed to authenticate: reached max connection retries");
}
@Override
public Connection connect(String url, Properties info) throws SQLException {
if (!acceptsURL(url)) {
return null;
}
String unwrappedUrl = "";
if (url.startsWith(SCHEME)) { // If this is a URL in the correct scheme, unwrap it
unwrappedUrl = unwrapUrl(url);
} else { // Else, assume this is a secret ID and try to retrieve it
try {
String secretString = secretCache.getSecretString(url);
if (StringUtils.isBlank(secretString)) {
throw new IllegalArgumentException("URL " + url + " is not a valid URL starting with scheme " +
SCHEME + " or a valid retrievable secret ID ");
}
JsonNode jsonObject = mapper.readTree(secretString);
String endpoint = jsonObject.get("host").asText();
JsonNode portNode = jsonObject.get("port");
String port = portNode == null ? null : portNode.asText();
JsonNode dbnameNode = jsonObject.get("dbname");
String dbname = dbnameNode == null ? null : dbnameNode.asText();
unwrappedUrl = constructUrlFromEndpointPortDatabase(endpoint, port, dbname);
} catch (IOException e) {
// Most likely to occur in the event that the data is not JSON.
// Or the secret has been modified and is no longer valid.
// Either scenario is most often a user error.
throw new RuntimeException(INVALID_SECRET_STRING_JSON);
}
}
if (info != null && info.getProperty("user") != null) {
String credentialsSecretId = info.getProperty("user");
try {
return connectWithSecret(unwrappedUrl, info, credentialsSecretId);
} catch (InterruptedException e) {
// User driven exception. Throw a runtime exception.
throw new RuntimeException(e);
}
} else {
return getWrappedDriver().connect(unwrappedUrl, info);
}
}
@Override
public int getMajorVersion() {
return getWrappedDriver().getMajorVersion();
}
@Override
public int getMinorVersion() {
return getWrappedDriver().getMinorVersion();
}
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
return getWrappedDriver().getParentLogger();
}
@Override
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
return getWrappedDriver().getPropertyInfo(unwrapUrl(url), info);
}
@Override
public boolean jdbcCompliant() {
return getWrappedDriver().jdbcCompliant();
}
}
| 6,155 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerPostgreSQLDriver.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import java.sql.SQLException;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.caching.SecretCacheConfiguration;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClientBuilder;
import software.amazon.awssdk.utils.StringUtils;
/**
* <p>
* Provides support for accessing PostgreSQL databases using credentials stored within AWS Secrets Manager.
* </p>
*
* <p>
* Configuration properties are specified using the "postgresql" subprefix (e.g drivers.postgresql.realDriverClass).
* </p>
*/
public final class AWSSecretsManagerPostgreSQLDriver extends AWSSecretsManagerDriver {
/**
* The PostgreSQL error code for when a user logs in using an invalid password.
*
* See <a href="https://www.postgresql.org/docs/9.6/static/errcodes-appendix.html">PostgreSQL documentation</a>.
*/
public static final String ACCESS_DENIED_FOR_USER_USING_PASSWORD_TO_DATABASE = "28P01";
/**
* Set to postgresql.
*/
public static final String SUBPREFIX = "postgresql";
static {
AWSSecretsManagerDriver.register(new AWSSecretsManagerPostgreSQLDriver());
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with default options.
*/
public AWSSecretsManagerPostgreSQLDriver() {
super();
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Uses the passed in SecretCache.
*
* @param cache Secret cache to use to retrieve secrets
*/
public AWSSecretsManagerPostgreSQLDriver(SecretCache cache) {
super(cache);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the passed in client builder.
*
* @param builder Builder used to instantiate cache
*/
public AWSSecretsManagerPostgreSQLDriver(SecretsManagerClientBuilder builder) {
super(builder);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided AWS Secrets Manager client.
*
* @param client AWS Secrets Manager client to instantiate cache
*/
public AWSSecretsManagerPostgreSQLDriver(SecretsManagerClient client) {
super(client);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided cache configuration.
*
* @param cacheConfig Cache configuration to instantiate cache
*/
public AWSSecretsManagerPostgreSQLDriver(SecretCacheConfiguration cacheConfig) {
super(cacheConfig);
}
@Override
public String getPropertySubprefix() {
return SUBPREFIX;
}
@Override
public boolean isExceptionDueToAuthenticationError(Exception e) {
if (e instanceof SQLException) {
SQLException sqle = (SQLException) e;
String sqlState = sqle.getSQLState();
return sqlState.equals(ACCESS_DENIED_FOR_USER_USING_PASSWORD_TO_DATABASE);
}
return false;
}
@Override
public String constructUrlFromEndpointPortDatabase(String endpoint, String port, String dbname) {
String url = "jdbc:postgresql://" + endpoint;
if (StringUtils.isNotBlank(port)) {
url += ":" + port;
}
url += "/";
if (StringUtils.isNotBlank(dbname)) {
url += dbname;
}
return url;
}
@Override
public String getDefaultDriverClass() {
return "org.postgresql.Driver";
}
}
| 6,156 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerMSSQLServerDriver.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import java.sql.SQLException;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.caching.SecretCacheConfiguration;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClientBuilder;
import software.amazon.awssdk.utils.StringUtils;
/**
* <p>
* Provides support for accessing MSSQLServer databases using credentials stored within AWS Secrets Manager.
* </p>
*
* <p>
* Configuration properties are specified using the "sqlserver" subprefix (e.g drivers.sqlserver.realDriverClass).
* </p>
*/
public final class AWSSecretsManagerMSSQLServerDriver extends AWSSecretsManagerDriver {
/**
* The MSSQLServer error code for when a user logs in using an invalid password.
*
* See
* <a
* href="https://docs.microsoft.com/en-us/sql/relational-databases/errors-events/database-engine-events-and-errors">
* MSSQL Server error codes</a>.
*/
public static final int LOGIN_FAILED = 18456;
/**
* Set to sqlserver.
*/
public static final String SUBPREFIX = "sqlserver";
static {
AWSSecretsManagerDriver.register(new AWSSecretsManagerMSSQLServerDriver());
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with default options.
*/
public AWSSecretsManagerMSSQLServerDriver() {
super();
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Uses the passed in SecretCache.
*
* @param cache Secret cache to use to retrieve secrets
*/
public AWSSecretsManagerMSSQLServerDriver(SecretCache cache) {
super(cache);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the passed in client builder.
*
* @param builder Builder used to instantiate cache
*/
public AWSSecretsManagerMSSQLServerDriver(SecretsManagerClientBuilder builder) {
super(builder);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided AWS Secrets Manager client.
*
* @param client AWS Secrets Manager client to instantiate cache
*/
public AWSSecretsManagerMSSQLServerDriver(SecretsManagerClient client) {
super(client);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided cache configuration.
*
* @param cacheConfig Cache configuration to instantiate cache
*/
public AWSSecretsManagerMSSQLServerDriver(SecretCacheConfiguration cacheConfig) {
super(cacheConfig);
}
@Override
public String getPropertySubprefix() {
return SUBPREFIX;
}
@Override
public boolean isExceptionDueToAuthenticationError(Exception e) {
if (e instanceof SQLException) {
SQLException sqle = (SQLException) e;
int errorCode = sqle.getErrorCode();
return errorCode == LOGIN_FAILED;
}
return false;
}
@Override
public String constructUrlFromEndpointPortDatabase(String endpoint, String port, String dbname) {
String url = "jdbc:sqlserver://" + endpoint;
if (StringUtils.isNotBlank(port)) {
url += ":" + port;
}
if (StringUtils.isNotBlank(dbname)) {
url += ";databaseName=" + dbname + ";";
}
return url;
}
@Override
public String getDefaultDriverClass() {
return "com.microsoft.sqlserver.jdbc.SQLServerDriver";
}
}
| 6,157 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerOracleDriver.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import java.sql.SQLException;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.caching.SecretCacheConfiguration;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClientBuilder;
import software.amazon.awssdk.utils.StringUtils;
/**
* <p>
* Provides support for accessing Oracle databases using credentials stored within AWS Secrets Manager.
* </p>
*
* <p>
* Configuration properties are specified using the "oracle" subprefix (e.g drivers.oracle.realDriverClass).
* </p>
*
* <p>
* For error codes see:
* </p>
*
* <ul>
* <li><a href="https://docs.oracle.com/en/database/oracle/oracle-database/12.2/jjdbc/JDBC-error-messages.html">
* Oracle JDBC error codes</a>.</li>
* <li><a href="https://docs.oracle.com/cd/B10501_01/server.920/a96525/e900.htm">Oracle JDBC error codes</a>.</li>
* </ul>
*/
public final class AWSSecretsManagerOracleDriver extends AWSSecretsManagerDriver {
/**
* ORA-17079. May not be necessary, but erring on the side of caution.
*/
public static final int USER_CREDENTIALS_DO_NOT_MATCH = 17079;
/**
* ORA-01017. This will occur if an incorrect password is used.
*/
public static final int INVALID_USERNAME_OR_PASSWORD = 1017;
/**
* ORA-09911. May not be necessary, but erring on the side of caution.
*/
public static final int INCORRECT_USER_PASSWORD = 9911;
/**
* Set to oracle.
*/
public static final String SUBPREFIX = "oracle";
static {
AWSSecretsManagerDriver.register(new AWSSecretsManagerOracleDriver());
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with default options.
*/
public AWSSecretsManagerOracleDriver() {
super();
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Uses the passed in SecretCache.
*
* @param cache Secret cache to use to retrieve secrets
*/
public AWSSecretsManagerOracleDriver(SecretCache cache) {
super(cache);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the passed in client builder.
*
* @param builder Builder used to instantiate cache
*/
public AWSSecretsManagerOracleDriver(SecretsManagerClientBuilder builder) {
super(builder);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided AWS Secrets Manager client.
*
* @param client AWS Secrets Manager client to instantiate cache
*/
public AWSSecretsManagerOracleDriver(SecretsManagerClient client) {
super(client);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided cache configuration.
*
* @param cacheConfig Cache configuration to instantiate cache
*/
public AWSSecretsManagerOracleDriver(SecretCacheConfiguration cacheConfig) {
super(cacheConfig);
}
@Override
public String getPropertySubprefix() {
return SUBPREFIX;
}
@Override
public boolean isExceptionDueToAuthenticationError(Exception e) {
if (e instanceof SQLException) {
SQLException sqle = (SQLException) e;
int errorCode = sqle.getErrorCode();
return errorCode == USER_CREDENTIALS_DO_NOT_MATCH
|| errorCode == INVALID_USERNAME_OR_PASSWORD
|| errorCode == INCORRECT_USER_PASSWORD;
}
return false;
}
@Override
public String constructUrlFromEndpointPortDatabase(String endpoint, String port, String dbname) {
String url = "jdbc:oracle:thin:@//" + endpoint;
if (StringUtils.isNotBlank(port)) {
url += ":" + port;
}
if (StringUtils.isNotBlank(dbname)) {
url += "/" + dbname;
}
return url;
}
@Override
public String getDefaultDriverClass() {
return "oracle.jdbc.OracleDriver";
}
}
| 6,158 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerMariaDBDriver.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.caching.SecretCacheConfiguration;
import com.amazonaws.secretsmanager.util.SQLExceptionUtils;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClientBuilder;
import software.amazon.awssdk.utils.StringUtils;
/**
* <p>
* Provides support for accessing MariaDB databases using credentials stored within AWS Secrets Manager.
* </p>
*
* <p>
* This will also work for MariaDB, as the error codes are the same.
* </p>
*
* <p>
* Configuration properties are specified using the "mariadb" subprefix (e.g drivers.mariadb.realDriverClass).
* </p>
*/
public final class AWSSecretsManagerMariaDBDriver extends AWSSecretsManagerDriver {
/**
* MariaDB shares error codes with MySQL, as well as adding a number of new error codes specific to MariaDB.
*
* See <a href="https://mariadb.com/kb/en/library/mariadb-error-codes/">MariaDB error codes</a>.
*/
public static final int ACCESS_DENIED_FOR_USER_USING_PASSWORD_TO_DATABASE = 1045;
/**
* Set to mariadb.
*/
public static final String SUBPREFIX = "mariadb";
static {
AWSSecretsManagerDriver.register(new AWSSecretsManagerMariaDBDriver());
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with default options.
*/
public AWSSecretsManagerMariaDBDriver() {
super();
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Uses the passed in SecretCache.
*
* @param cache Secret cache to use to retrieve secrets
*/
public AWSSecretsManagerMariaDBDriver(SecretCache cache) {
super(cache);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the passed in client builder.
*
* @param builder Builder used to instantiate cache
*/
public AWSSecretsManagerMariaDBDriver(SecretsManagerClientBuilder builder) {
super(builder);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided AWS Secrets Manager client.
*
* @param client AWS Secrets Manager client to instantiate cache
*/
public AWSSecretsManagerMariaDBDriver(SecretsManagerClient client) {
super(client);
}
/**
* Constructs the driver setting the properties from the properties file using system properties as defaults.
* Instantiates the secret cache with the provided cache configuration.
*
* @param cacheConfig Cache configuration to instantiate cache
*/
public AWSSecretsManagerMariaDBDriver(SecretCacheConfiguration cacheConfig) {
super(cacheConfig);
}
@Override
public String getPropertySubprefix() {
return SUBPREFIX;
}
@Override
public boolean isExceptionDueToAuthenticationError(Exception e) {
return SQLExceptionUtils.unwrapAndCheckForCode(e, ACCESS_DENIED_FOR_USER_USING_PASSWORD_TO_DATABASE);
}
@Override
public String constructUrlFromEndpointPortDatabase(String endpoint, String port, String dbname) {
String url = "jdbc:mariadb://" + endpoint;
if (StringUtils.isNotBlank(port)) {
url += ":" + port;
}
if (StringUtils.isNotBlank(dbname)) {
url += "/" + dbname;
}
return url;
}
@Override
public String getDefaultDriverClass() {
return "org.mariadb.jdbc.Driver";
}
}
| 6,159 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/sql/AWSSecretsManagerRedshiftDriver.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.secretsmanager.sql;
import java.sql.SQLException;
import com.amazonaws.secretsmanager.caching.SecretCache;
import com.amazonaws.secretsmanager.caching.SecretCacheConfiguration;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClientBuilder;
import software.amazon.awssdk.utils.StringUtils;
/**
* <p>
* Provides support for accessing Redshift databases using credentials stored
* within AWS Secrets Manager.
* </p>
*
* <p>
* Configuration properties are specified using the "redshift" subprefix (e.g
* drivers.redshift.realDriverClass).
* </p>
*/
public final class AWSSecretsManagerRedshiftDriver extends AWSSecretsManagerDriver {
/**
* The Redshift error code for when a user logs in using an invalid password.
*
* See <a href=
* "https://www.postgresql.org/docs/9.6/static/errcodes-appendix.html">Postgres documentation</a> (Redshift is built on Postgres).
*/
public static final String ACCESS_DENIED_FOR_USER_USING_PASSWORD_TO_DATABASE = "28P01";
public static final String SUBPREFIX = "redshift";
static {
AWSSecretsManagerDriver.register(new AWSSecretsManagerRedshiftDriver());
}
/**
* Constructs the driver setting the properties from the properties file using
* system properties as defaults.
* Instantiates the secret cache with default options.
*/
public AWSSecretsManagerRedshiftDriver() {
super();
}
/**
* Constructs the driver setting the properties from the properties file using
* system properties as defaults.
* Uses the passed in SecretCache.
*
* @param cache Secret cache to use to retrieve secrets
*/
public AWSSecretsManagerRedshiftDriver(SecretCache cache) {
super(cache);
}
/**
* Constructs the driver setting the properties from the properties file using
* system properties as defaults.
* Instantiates the secret cache with the passed in client builder.
*
* @param builder Builder used to instantiate cache
*/
public AWSSecretsManagerRedshiftDriver(SecretsManagerClientBuilder builder) {
super(builder);
}
/**
* Constructs the driver setting the properties from the properties file using
* system properties as defaults.
* Instantiates the secret cache with the provided AWS Secrets Manager client.
*
* @param client AWS Secrets Manager client to instantiate cache
*/
public AWSSecretsManagerRedshiftDriver(SecretsManagerClient client) {
super(client);
}
/**
* Constructs the driver setting the properties from the properties file using
* system properties as defaults.
* Instantiates the secret cache with the provided cache configuration.
*
* @param cacheConfig Cache configuration to instantiate cache
*/
public AWSSecretsManagerRedshiftDriver(SecretCacheConfiguration cacheConfig) {
super(cacheConfig);
}
@Override
public String getPropertySubprefix() {
return SUBPREFIX;
}
@Override
public boolean isExceptionDueToAuthenticationError(Exception e) {
if (e instanceof SQLException) {
SQLException sqle = (SQLException) e;
String sqlState = sqle.getSQLState();
return sqlState.equals(ACCESS_DENIED_FOR_USER_USING_PASSWORD_TO_DATABASE);
}
return false;
}
@Override
public String constructUrlFromEndpointPortDatabase(String endpoint, String port, String dbname) {
String url = "jdbc:redshift://" + endpoint;
if (StringUtils.isNotBlank(port)) {
url += ":" + port;
}
if (StringUtils.isNotBlank(dbname)) {
url += "/" + dbname;
}
return url;
}
@Override
public String getDefaultDriverClass() {
return "com.amazon.redshift.Driver";
}
}
| 6,160 |
0 | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager | Create_ds/aws-secretsmanager-jdbc/src/main/java/com/amazonaws/secretsmanager/sql/package-info.java | /*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
/**
* This package contains JDBC drivers that use secret IDs rather than hard-coded database credentials. They accomplish
* this by substituting a secret ID specified by the user field in their <code>connect</code> properties with the
* associated credentials from Secrets Manager. The call to <code>connect</code> is then propagated to a real JDBC
* driver that actually establishes the connection. See the <code>AWSSecretsManagerDriver</code> class and the
* individual drivers for configuration and usage details.
*/
package com.amazonaws.secretsmanager.sql;
| 6,161 |
0 | Create_ds/derand/derand/src/test/java/netflixoss | Create_ds/derand/derand/src/test/java/netflixoss/derand/DerandTest.java | /**
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package netflixoss.derand;
import ai.djl.translate.TranslateException;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class DerandTest {
@Test public void testTokenize() throws TranslateException, IOException {
assertEquals("hello <rnd> world", Derand.tokenize("hello 3y29842ysjhfs world"));
assertEquals("", Derand.tokenize(""));
assertEquals("", Derand.tokenize(null));
assertEquals("<rnd>", Derand.tokenize("3y29842ysjhfs"));
}
@Test public void testClean() throws TranslateException, IOException {
assertEquals("hello world", Derand.clean("hello 3y29842ysjhfs world"));
assertEquals("", Derand.clean(""));
assertEquals("", Derand.clean(null));
assertEquals("", Derand.clean("3y29842ysjhfs"));
assertEquals("hello hello", Derand.clean("y837sc42zsd hello sdyd8f7h34 hello 3y29842ysjhfs"));
}
@Test public void testClassify() throws TranslateException, IOException {
assertArrayEquals(new boolean[]{false, true, false}, Derand.classify("hello 3y29842ysjhfs world"));
assertArrayEquals(new boolean[]{}, Derand.classify(""));
assertArrayEquals(new boolean[]{}, Derand.classify(null));
assertArrayEquals(new boolean[]{true}, Derand.classify("3y29842ysjhfs"));
assertArrayEquals(new boolean[]{true, false, true, false, true}, Derand.classify("y837sc42zsd hello sdyd8f7h34 hello 3y29842ysjhfs"));
}
}
| 6,162 |
0 | Create_ds/derand/derand/src/main/java/netflixoss | Create_ds/derand/derand/src/main/java/netflixoss/derand/StringToEncoderTranslator.java | /**
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package netflixoss.derand;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.translate.Batchifier;
import ai.djl.translate.Translator;
import ai.djl.translate.TranslatorContext;
import java.util.Arrays;
import java.util.List;
public final class StringToEncoderTranslator implements Translator<String, Boolean> {
StringToEncoderTranslator() {}
private static final List<Character> modelChars = Arrays.asList('!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.',
'/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<',
'=', '>', '?', '@', '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z');
int maxLen = 16;
@Override
public NDList processInput(TranslatorContext ctx, String input) {
/**
* This method allows to convert word "hello" to a {@modelChars.lenght()} dimensional array with values representing character index
* in the modelChars array and if the word is less than 50 characters, it will be left padded with 0f.
*
* For example, if {@modelChars.lenght()} == 50, "hello" will be converted to
* [
* 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
* 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
* 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 37, 44,
* 44, 47
* ]
*
* which is a numerical input that neural network understand.
*/
NDManager manager = ctx.getNDManager();
if (input == null){
return new NDList(manager.create(new float[maxLen]).expandDims(0));
}
int charArraySize = input.length();
// special case
if (charArraySize == 0 ){
// return 0 filled array
return new NDList(manager.create(new float[maxLen]).expandDims(0));
}
float[] finalResult = new float[maxLen];
if(charArraySize > maxLen){
/*
Removing extra characters from the left
*/
int startIdx = charArraySize - maxLen;
int currentIdx = 0;
int beginIndex = 0;
for (Character charVal: input.toCharArray()){
if(currentIdx < startIdx){
currentIdx++;
continue;
}
int charIdx = modelChars.indexOf(charVal);
if(charIdx != -1){
finalResult[beginIndex] = (float) charIdx;
} else {
finalResult[beginIndex] = 0f;
}
beginIndex++;
currentIdx++;
}
} else if (charArraySize < maxLen) { // pad with 0s on the left
int startIdx = maxLen - charArraySize;
int currentIdx = 0;
for (Character charVal: input.toCharArray()){
if(currentIdx < startIdx){
finalResult[currentIdx] = 0f;
} else {
int charIdx = modelChars.indexOf(charVal);
if(charIdx != -1){
finalResult[currentIdx] = (float) charIdx;
} else {
finalResult[currentIdx] = 0f;
}
}
currentIdx++;
}
} else { // same size of maxLen and input
int currentIdx = 0;
for (Character charVal: input.toCharArray()){
int charIdx = modelChars.indexOf(charVal);
if(charIdx != -1){
finalResult[currentIdx] = (float) charIdx;
} else {
finalResult[currentIdx] = 0f;
}
currentIdx++;
}
}
return new NDList(manager.create(finalResult).expandDims(0));
}
@Override
public Boolean processOutput(TranslatorContext ctx, NDList list) {
NDArray ndArray = list.singletonOrThrow();
NDArray ndArray1 = ndArray.get(0).argMax();
return ndArray1.toUint8Array()[0] == 1;
}
@Override
public Batchifier getBatchifier() {
return null;
}
} | 6,163 |
0 | Create_ds/derand/derand/src/main/java/netflixoss | Create_ds/derand/derand/src/main/java/netflixoss/derand/Derand.java | /**
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package netflixoss.derand;
import ai.djl.MalformedModelException;
import ai.djl.inference.Predictor;
import ai.djl.repository.zoo.Criteria;
import ai.djl.repository.zoo.ModelNotFoundException;
import ai.djl.repository.zoo.ModelZoo;
import ai.djl.repository.zoo.ZooModel;
import ai.djl.translate.TranslateException;
import java.io.IOException;
import java.util.*;
public class Derand {
private static final String MODEL_URL = "https://randomly-public-us-east-1.s3.amazonaws.com/derand.onnx.zip";
private static final String MODEL_NAME = "derand.onnx";
private static final String RND = "<rnd>";
private static final ThreadLocal<Predictor<String, Boolean>> predictorHolder = new ThreadLocal<>();
synchronized static Optional<ZooModel<String, Boolean>> init() throws IOException, ModelNotFoundException, MalformedModelException {
if(model == null || !model.isPresent()){
return Optional.ofNullable(getModel());
} else {
return model;
}
}
private static ZooModel<String, Boolean> getModel() throws MalformedModelException, ModelNotFoundException, IOException {
Criteria<String, Boolean> criteria = Criteria.builder()
.setTypes(String.class, Boolean.class)
.optTranslator(new StringToEncoderTranslator())
.optModelUrls(MODEL_URL)
.optModelName(MODEL_NAME)
.optEngine("OnnxRuntime")
.build();
return ModelZoo.loadModel(criteria);
}
// TODO: add retry
private static Optional<ZooModel<String, Boolean>> model;
static {
try {
model = init();
} catch (IOException | ModelNotFoundException | MalformedModelException e) {
model = Optional.empty();
}
}
private static boolean[] predictRandomnessPerWord(String[] words) throws TranslateException, IOException {
if(!model.isPresent()){
throw new IOException("model can not be null");
}
Predictor<String, Boolean> predictor = predictorHolder.get();
if(predictor == null){
predictor = model.get().newPredictor();
predictorHolder.set(predictor);
}
boolean[] result = new boolean[words.length];
int idx = 0;
for (String word: words){
boolean isRandom = predictor.predict(word);
result[idx] = isRandom;
idx++;
}
return result;
}
private static String[] tokenizeWords(String[] words) throws TranslateException, IOException {
String[] result = new String[words.length];
boolean[] randomMask = predictRandomnessPerWord(words);
int idx = 0;
for(boolean isRandom: randomMask){
if(isRandom){
result[idx] = RND;
} else{
result[idx] = words[idx];
}
idx++;
}
return result;
}
public static String tokenize(String text) throws TranslateException, IOException {
if (isEmpty(text)) return "";
StringJoiner joiner = new StringJoiner(" ");
for(String tokenizedWord :tokenizeWords(text.split(" "))){
joiner.add(tokenizedWord);
}
return joiner.toString();
}
public static String clean(String text) throws TranslateException, IOException {
if (isEmpty(text)) return "";
StringJoiner joiner = new StringJoiner(" ");
for(String tokenizedWord :tokenizeWords(text.split(" "))){
if(!tokenizedWord.equals(RND)){
joiner.add(tokenizedWord);
}
}
return joiner.toString();
}
public static boolean[] classify(String text) throws TranslateException, IOException {
/**
* return boolean array that represents each word in a string, split by whitespace, as either random (true) or non-random (false) element
*/
if (isEmpty(text)) return new boolean[]{};
return predictRandomnessPerWord(text.split(" "));
}
private static boolean isEmpty(String text) {
return text == null || text.isEmpty() || text.trim().isEmpty();
}
}
| 6,164 |
0 | Create_ds/ocelli/ocelli-examples/src/main/java/netflix/ocelli/examples/rxnetty | Create_ds/ocelli/ocelli-examples/src/main/java/netflix/ocelli/examples/rxnetty/http/RandomWeighted.java | package netflix.ocelli.examples.rxnetty.http;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.client.HttpClient;
import netflix.ocelli.Instance;
import netflix.ocelli.examples.rxnetty.http.HttpExampleUtils.*;
import netflix.ocelli.rxnetty.protocol.http.HttpLoadBalancer;
import netflix.ocelli.rxnetty.protocol.http.WeightedHttpClientListener;
import rx.Observable;
import java.net.ConnectException;
import java.net.SocketAddress;
import java.net.SocketException;
import java.nio.charset.Charset;
import java.util.concurrent.TimeUnit;
import static netflix.ocelli.examples.rxnetty.http.HttpExampleUtils.*;
public final class RandomWeighted {
private RandomWeighted() {
}
public static void main(String[] args) {
Observable<Instance<SocketAddress>> hosts = newHostStreamWithCannedLatencies(1L, 2L);
HttpLoadBalancer<ByteBuf, ByteBuf> lb =
HttpLoadBalancer.<ByteBuf, ByteBuf>weigthedRandom(hosts, failureListener -> {
return new WeightedHttpClientListener() {
private volatile int weight;
@Override
public int getWeight() {
return weight;
}
@Override
public void onResponseHeadersReceived(int responseCode, long duration, TimeUnit timeUnit) {
/* This is just a demo for how to wire the weight of an instance to the load balancer, it
* certainly is not the algorithm to be used in real production applications.
*/
weight = (int) (Long.MAX_VALUE - duration); // High latency => low weight
if (responseCode == 503) {
// When throttled, quarantine.
failureListener.quarantine(1, TimeUnit.MINUTES);
}
}
@Override
public void onConnectFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
// Connect failed, remove
failureListener.remove();
}
};
});
HttpClient.newClient(lb.toConnectionProvider())
.createGet("/hello")
.doOnNext(System.out::println)
.flatMap(resp -> {
if (resp.getStatus().code() != 200) {
return Observable.error(new InvalidResponseException());
}
return resp.getContent();
})
.retry((integer, throwable) -> throwable instanceof SocketException
|| throwable instanceof ConnectException
|| throwable instanceof InvalidResponseException)
.repeat(10)
.toBlocking()
.forEach(bb -> bb.toString(Charset.defaultCharset()));
}
}
| 6,165 |
0 | Create_ds/ocelli/ocelli-examples/src/main/java/netflix/ocelli/examples/rxnetty | Create_ds/ocelli/ocelli-examples/src/main/java/netflix/ocelli/examples/rxnetty/http/ChoiceOfTwo.java | package netflix.ocelli.examples.rxnetty.http;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.client.HttpClient;
import netflix.ocelli.Instance;
import netflix.ocelli.rxnetty.protocol.http.HttpLoadBalancer;
import netflix.ocelli.rxnetty.protocol.http.WeightedHttpClientListener;
import rx.Observable;
import java.net.ConnectException;
import java.net.SocketAddress;
import java.net.SocketException;
import java.nio.charset.Charset;
import java.util.concurrent.TimeUnit;
import static netflix.ocelli.examples.rxnetty.http.HttpExampleUtils.*;
public final class ChoiceOfTwo {
private ChoiceOfTwo() {
}
public static void main(String[] args) {
Observable<Instance<SocketAddress>> hosts = newHostStreamWithCannedLatencies(5L, 1L, 2L, 1L, 0L);
HttpLoadBalancer<ByteBuf, ByteBuf> lb =
HttpLoadBalancer.<ByteBuf, ByteBuf>choiceOfTwo(hosts, failureListener -> {
return new WeightedHttpClientListener() {
private volatile int lastSeenLatencyInverse;
@Override
public int getWeight() {
return lastSeenLatencyInverse;
}
@Override
public void onResponseHeadersReceived(int responseCode, long duration, TimeUnit timeUnit) {
/* This is just a demo for how to wire the weight of an instance to the load balancer, it
* certainly is not the algorithm to be used in real production applications.
*/
lastSeenLatencyInverse = Integer.MAX_VALUE - (int)duration; // High latency => low weight
if (responseCode == 503) {
// When throttled, quarantine.
failureListener.quarantine(1, TimeUnit.MINUTES);
}
}
@Override
public void onConnectFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
// Connect failed, remove
failureListener.remove();
}
};
});
HttpClient.newClient(lb.toConnectionProvider())
.createGet("/hello")
.doOnNext(System.out::println)
.flatMap(resp -> {
if (resp.getStatus().code() != 200) {
return Observable.error(new InvalidResponseException());
}
return resp.getContent();
})
.retry((integer, throwable) -> throwable instanceof SocketException
|| throwable instanceof ConnectException
|| throwable instanceof InvalidResponseException)
.repeat(10)
.toBlocking()
.forEach(bb -> bb.toString(Charset.defaultCharset()));
}
}
| 6,166 |
0 | Create_ds/ocelli/ocelli-examples/src/main/java/netflix/ocelli/examples/rxnetty | Create_ds/ocelli/ocelli-examples/src/main/java/netflix/ocelli/examples/rxnetty/http/HttpExampleUtils.java | package netflix.ocelli.examples.rxnetty.http;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.reactivex.netty.protocol.http.server.HttpServer;
import netflix.ocelli.Instance;
import rx.Observable;
import rx.functions.Func1;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.concurrent.TimeUnit;
final class HttpExampleUtils {
private HttpExampleUtils() {
}
protected static Observable<Instance<SocketAddress>> newHostStreamWithCannedLatencies(Long... latencies) {
return Observable.from(latencies)
.map(latency -> {
return startServer(latency);
})
.map(new SockAddrToInstance());
}
protected static Observable<Instance<SocketAddress>> newHostStreamWithCannedStatus(
HttpResponseStatus... cannedStatuses) {
return Observable.from(cannedStatuses)
.map(cannedStatus -> {
if (null != cannedStatus) {
return startServer(cannedStatus);
}
return new InetSocketAddress(0);
})
.map(new SockAddrToInstance());
}
protected static SocketAddress startServer(long latencyMillis) {
return HttpServer.newServer()
.start((request, response) -> {
return Observable.timer(latencyMillis, TimeUnit.MILLISECONDS)
.flatMap(aTick -> response.addHeader("X-Instance",
response.unsafeNettyChannel()
.localAddress())
.setStatus(HttpResponseStatus.OK));
})
.getServerAddress();
}
protected static SocketAddress startServer(HttpResponseStatus cannedStatus) {
return HttpServer.newServer()
.start((request, response) -> {
return response.addHeader("X-Instance", response.unsafeNettyChannel().localAddress())
.setStatus(cannedStatus);
})
.getServerAddress();
}
protected static class InvalidResponseException extends RuntimeException {
private static final long serialVersionUID = -712946630951320233L;
public InvalidResponseException() {
}
}
private static class SockAddrToInstance implements Func1<SocketAddress, Instance<SocketAddress>> {
@Override
public Instance<SocketAddress> call(SocketAddress socketAddr) {
return new Instance<SocketAddress>() {
@Override
public Observable<Void> getLifecycle() {
return Observable.never();
}
@Override
public SocketAddress getValue() {
return socketAddr;
}
};
}
}
}
| 6,167 |
0 | Create_ds/ocelli/ocelli-examples/src/main/java/netflix/ocelli/examples/rxnetty | Create_ds/ocelli/ocelli-examples/src/main/java/netflix/ocelli/examples/rxnetty/http/RoundRobin.java | package netflix.ocelli.examples.rxnetty.http;
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.client.HttpClient;
import io.reactivex.netty.protocol.http.client.events.HttpClientEventsListener;
import netflix.ocelli.Instance;
import netflix.ocelli.examples.rxnetty.http.HttpExampleUtils.*;
import netflix.ocelli.rxnetty.protocol.http.HttpLoadBalancer;
import rx.Observable;
import java.net.ConnectException;
import java.net.SocketAddress;
import java.net.SocketException;
import java.nio.charset.Charset;
import java.util.concurrent.TimeUnit;
import static io.netty.handler.codec.http.HttpResponseStatus.*;
import static netflix.ocelli.examples.rxnetty.http.HttpExampleUtils.*;
public final class RoundRobin {
private RoundRobin() {
}
public static void main(String[] args) {
Observable<Instance<SocketAddress>> hosts = newHostStreamWithCannedStatus(OK, SERVICE_UNAVAILABLE,
null/*Unavailable socket address*/);
HttpLoadBalancer<ByteBuf, ByteBuf> lb =
HttpLoadBalancer.<ByteBuf, ByteBuf>roundRobin(hosts, failureListener -> {
return new HttpClientEventsListener() {
@Override
public void onResponseHeadersReceived(int responseCode, long duration, TimeUnit timeUnit) {
if (responseCode == 503) {
// When throttled, quarantine.
failureListener.quarantine(1, TimeUnit.MINUTES);
}
}
@Override
public void onConnectFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
// Connect failed, remove
failureListener.remove();
}
};
});
HttpClient.newClient(lb.toConnectionProvider())
.createGet("/hello")
.doOnNext(System.out::println)
.flatMap(resp -> {
if (resp.getStatus().code() != 200) {
return Observable.error(new InvalidResponseException());
}
return resp.getContent();
})
.retry((integer, throwable) -> throwable instanceof SocketException
|| throwable instanceof ConnectException
|| throwable instanceof InvalidResponseException)
.repeat(10)
.toBlocking()
.forEach(bb -> bb.toString(Charset.defaultCharset()));
}
}
| 6,168 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/InstanceQuarantinerTest.java | package netflix.ocelli;
import java.util.Comparator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import netflix.ocelli.InstanceQuarantiner.IncarnationFactory;
import netflix.ocelli.functions.Delays;
import netflix.ocelli.loadbalancer.ChoiceOfTwoLoadBalancer;
import netflix.ocelli.loadbalancer.RoundRobinLoadBalancer;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
import rx.Observable;
import rx.functions.Func1;
import rx.schedulers.TestScheduler;
public class InstanceQuarantinerTest {
public static interface EventListener {
public void onBegin();
public void onSuccess();
public void onFailed();
}
final static TestScheduler scheduler = new TestScheduler();
public static class Client implements EventListener {
public static Func1<Instance<Integer>, Instance<Client>> connector() {
return new Func1<Instance<Integer>, Instance<Client>>() {
@Override
public Instance<Client> call(Instance<Integer> t1) {
return Instance.create(new Client(t1.getValue(), t1.getLifecycle()), t1.getLifecycle());
}
};
}
public static IncarnationFactory<Client> incarnationFactory() {
return new IncarnationFactory<Client>() {
@Override
public Client create(
Client value,
InstanceEventListener listener,
Observable<Void> lifecycle) {
return new Client(value, listener, lifecycle);
}
};
}
private Integer address;
private final Observable<Void> lifecycle;
private final AtomicInteger counter;
private AtomicInteger score = new AtomicInteger();
private final InstanceEventListener listener;
private final int id;
public Client(Integer address, Observable<Void> lifecycle) {
this.address = address;
this.counter = new AtomicInteger();
this.lifecycle = lifecycle;
this.listener = null;
id = 0;
}
Client(Client client, InstanceEventListener listener, Observable<Void> lifecycle) {
this.address = client.address;
this.counter = client.counter;
this.lifecycle = lifecycle;
this.listener = listener;
id = this.counter.incrementAndGet();
}
@Override
public void onBegin() {
}
@Override
public void onSuccess() {
listener.onEvent(InstanceEvent.EXECUTION_SUCCESS, 0, TimeUnit.SECONDS, null, null);
}
@Override
public void onFailed() {
listener.onEvent(InstanceEvent.EXECUTION_FAILED, 0, TimeUnit.SECONDS, new Exception("Failed"), null);
}
public Integer getValue() {
return address;
}
public int getId() {
return id;
}
public String toString() {
return address.toString() + "[" + id + "]";
}
public static Comparator<Client> compareByMetric() {
return new Comparator<Client>() {
@Override
public int compare(Client o1, Client o2) {
return o1.score.get() - o2.score.get();
}
};
}
public Observable<Void> getLifecycle() {
return lifecycle;
}
}
@Test
public void basicTest() {
final InstanceManager<Integer> instances = InstanceManager.create();
final LoadBalancer<Client> lb = LoadBalancer
.fromSource(instances.map(Client.connector()))
.withQuarantiner(Client.incarnationFactory(), Delays.fixed(1, TimeUnit.SECONDS), scheduler)
.build(RoundRobinLoadBalancer.<Client>create());
instances.add(1);
// Load balancer now has one instance
Client c = lb.next();
Assert.assertNotNull("Load balancer should have an active intance", c);
Assert.assertEquals(1, c.getId());
// Force the instance to fail
c.onFailed();
// Load balancer is now empty
try {
c = lb.next();
Assert.fail("Load balancer should be empty");
}
catch (NoSuchElementException e) {
}
// Advance past quarantine time
scheduler.advanceTimeBy(2, TimeUnit.SECONDS);
c = lb.next();
Assert.assertNotNull("Load balancer should have an active intance", c);
Assert.assertEquals(2, c.getId());
// Force the instance to fail
c.onFailed();
// Load balancer is now empty
try {
c = lb.next();
Assert.fail("Load balancer should be empty");
}
catch (NoSuchElementException e) {
}
// Advance past quarantine time
scheduler.advanceTimeBy(2, TimeUnit.SECONDS);
c = lb.next();
Assert.assertNotNull("Load balancer should have an active intance", c);
Assert.assertEquals(3, c.counter.get());
// Remove the instance entirely
instances.remove(1);
try {
c = lb.next();
Assert.fail();
}
catch (NoSuchElementException e) {
}
System.out.println(c);
}
@Test
@Ignore
public void test() {
final InstanceManager<Integer> instances = InstanceManager.create();
final LoadBalancer<Client> lb = LoadBalancer
.fromSource(instances.map(Client.connector()))
.withQuarantiner(Client.incarnationFactory(), Delays.fixed(1, TimeUnit.SECONDS), scheduler)
.build(RoundRobinLoadBalancer.<Client>create());
// Add to the load balancer
instances.add(1);
instances.add(2);
// Perform 10 operations
List<String> result = Observable
.interval(100, TimeUnit.MILLISECONDS)
.concatMap(new Func1<Long, Observable<String>>() {
@Override
public Observable<String> call(final Long counter) {
return Observable.just(lb.next())
.concatMap(new Func1<Client, Observable<String>>() {
@Override
public Observable<String> call(Client instance) {
instance.onBegin();
if (1 == instance.getValue()) {
instance.onFailed();
return Observable.error(new Exception("Failed"));
}
instance.onSuccess();
return Observable.just(instance + "-" + counter);
}
})
.retry(1);
}
})
.take(10)
.toList()
.toBlocking()
.first()
;
}
@Test
public void integrationTest() {
final InstanceManager<Integer> instances = InstanceManager.create();
final LoadBalancer<Client> lb = LoadBalancer
.fromSource(instances.map(Client.connector()))
.withQuarantiner(Client.incarnationFactory(), Delays.fixed(1, TimeUnit.SECONDS), scheduler)
.build(ChoiceOfTwoLoadBalancer.<Client>create(Client.compareByMetric()));
instances.add(1);
Client client = lb.next();
instances.add(2);
client = lb.next();
}
}
| 6,169 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/LoadBalancerTest.java | package netflix.ocelli;
import com.google.common.collect.Lists;
import netflix.ocelli.InstanceQuarantiner.IncarnationFactory;
import netflix.ocelli.functions.Delays;
import netflix.ocelli.loadbalancer.ChoiceOfTwoLoadBalancer;
import netflix.ocelli.loadbalancer.RoundRobinLoadBalancer;
import org.hamcrest.MatcherAssert;
import org.junit.Test;
import rx.Observable;
import rx.Subscription;
import rx.functions.Func0;
import rx.functions.Func1;
import rx.schedulers.TestScheduler;
import java.util.Comparator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.Matchers.*;
public class LoadBalancerTest {
private static final Comparator<ClientWithWeight> COMPARE_BY_WEIGHT = new Comparator<ClientWithWeight>() {
@Override
public int compare(ClientWithWeight o1, ClientWithWeight o2) {
return o1.weight.compareTo(o2.weight);
}
};
private static final Func1<Instance<String>, Instance<ClientWithWeight>> CLIENT_FROM_ADDRESS = new Func1<Instance<String>, Instance<ClientWithWeight>>() {
@Override
public Instance<ClientWithWeight> call(Instance<String> t1) {
return Instance.create(new ClientWithWeight(t1.getValue(), 1), t1.getLifecycle());
}
};
@Test
public void createFromFixedList() {
LoadBalancer<String> lb = LoadBalancer
.fromFixedSource(Lists.newArrayList("host1:8080", "host2:8080"))
.build(RoundRobinLoadBalancer.<String>create(0),
InstanceCollector.create(new Func0<Map<String, Subscription>>() {
@Override
public Map<String, Subscription> call() {
return new LinkedHashMap<String, Subscription>();
}
}))
;
MatcherAssert.assertThat("Unexpected first host chosen.", lb.next(), is("host2:8080"));
MatcherAssert.assertThat("Unexpected second host chosen.", lb.next(), is("host1:8080"));
}
@Test
public void createFromFixedListAndConvertToDifferentType() {
LoadBalancer<ClientWithWeight> lb = LoadBalancer
.fromFixedSource(Lists.newArrayList("host1:8080", "host2:8080"))
.convertTo(CLIENT_FROM_ADDRESS)
.build(RoundRobinLoadBalancer.<ClientWithWeight>create(0),
InstanceCollector.create(new Func0<Map<ClientWithWeight, Subscription>>() {
@Override
public Map<ClientWithWeight, Subscription> call() {
return new LinkedHashMap<ClientWithWeight, Subscription>();
}
}))
;
MatcherAssert.assertThat("Unexpected first host chosen.", lb.next().address, equalTo("host2:8080"));
MatcherAssert.assertThat("Unexpected second host chosen.", lb.next().address, equalTo("host1:8080"));
}
@Test
public void createFromFixedListWithAdvancedAlgorithm() {
LoadBalancer<ClientWithWeight> lb = LoadBalancer
.fromFixedSource(
Lists.newArrayList(new ClientWithWeight("host1:8080", 1), new ClientWithWeight("host2:8080", 2)))
.build(ChoiceOfTwoLoadBalancer.create(COMPARE_BY_WEIGHT),
InstanceCollector.create(new Func0<Map<ClientWithWeight, Subscription>>() {
@Override
public Map<ClientWithWeight, Subscription> call() {
return new LinkedHashMap<ClientWithWeight, Subscription>();
}
}))
;
MatcherAssert.assertThat("Unexpected first host chosen.", lb.next().address, equalTo("host2:8080"));
MatcherAssert.assertThat("Unexpected second host chosen.", lb.next().address, equalTo("host2:8080"));
}
@Test
public void createFromFixedAndUseQuaratiner() {
TestScheduler scheduler = new TestScheduler();
LoadBalancer<ClientWithLifecycle> lb = LoadBalancer
.fromFixedSource(Lists.newArrayList(new ClientWithLifecycle("host1:8080"),
new ClientWithLifecycle("host2:8080")))
.withQuarantiner(new IncarnationFactory<ClientWithLifecycle>() {
@Override
public ClientWithLifecycle create(ClientWithLifecycle client,
InstanceEventListener listener,
Observable<Void> lifecycle) {
return new ClientWithLifecycle(client, listener);
}
}, Delays.fixed(10, TimeUnit.SECONDS), scheduler)
.build(RoundRobinLoadBalancer.<ClientWithLifecycle>create(0),
InstanceCollector.create(new Func0<Map<ClientWithLifecycle, Subscription>>() {
@Override
public Map<ClientWithLifecycle, Subscription> call() {
return new LinkedHashMap<ClientWithLifecycle, Subscription>();
}
}));
ClientWithLifecycle clientToFail = lb.next();
MatcherAssert.assertThat("Unexpected host chosen before failure.", clientToFail.address, equalTo("host2:8080"));
clientToFail.forceFail();
MatcherAssert.assertThat("Unexpected first host chosen post failure.", lb.next().address, equalTo("host1:8080"));
MatcherAssert.assertThat("Unexpected second host chosen post failure.", lb.next().address, equalTo("host1:8080"));
}
static class ClientWithLifecycle {
private final String address;
private final InstanceEventListener listener;
public ClientWithLifecycle(String address) {
this.address = address;
listener = null;
}
public ClientWithLifecycle(ClientWithLifecycle parent, InstanceEventListener listener) {
address = parent.address;
this.listener = listener;
}
public void forceFail() {
listener.onEvent(InstanceEvent.EXECUTION_FAILED, 0, TimeUnit.MILLISECONDS, new Throwable("Failed"), null);
}
}
static class ClientWithWeight {
private final String address;
private final Integer weight;
public ClientWithWeight(String address, int weight) {
this.address = address;
this.weight = weight;
}
}
}
| 6,170 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/retry/RetryFailedTestRule.java | package netflix.ocelli.retry;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.junit.rules.TestRule;
import org.junit.runner.Description;
import org.junit.runners.model.Statement;
public class RetryFailedTestRule implements TestRule {
private int attemptNumber;
@Target({ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface Retry {
int value();
}
public RetryFailedTestRule() {
this.attemptNumber = 0;
}
public Statement apply(final Statement base, final Description description) {
Retry retry = description.getAnnotation(Retry.class);
final int retryCount = retry == null ? 1 : retry.value();
return new Statement() {
@Override
public void evaluate() throws Throwable {
Throwable caughtThrowable = null;
for (attemptNumber = 0; attemptNumber <= retryCount; ++attemptNumber) {
try {
base.evaluate();
System.err.println(description.getDisplayName() + ": attempt number " + attemptNumber + " succeeded");
return;
} catch (Throwable t) {
caughtThrowable = t;
System.err.println(description.getDisplayName() + ": attempt number " + attemptNumber + " failed:");
System.err.println(t.toString());
}
}
System.err.println(description.getDisplayName() + ": giving up after " + retryCount + " failures.");
throw caughtThrowable;
}
};
}
public int getAttemptNumber() {
return attemptNumber;
}
}
| 6,171 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/retry/BackupRequestStrategyTest.java | package netflix.ocelli.retry;
import netflix.ocelli.LoadBalancer;
import netflix.ocelli.functions.Metrics;
import netflix.ocelli.loadbalancer.RoundRobinLoadBalancer;
import netflix.ocelli.retrys.BackupRequestRetryStrategy;
import netflix.ocelli.util.RxUtil;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
import rx.Observable;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.schedulers.TestScheduler;
import rx.subjects.BehaviorSubject;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
@Ignore
// Test needs to be updated to tracks clients not as Observables since they cannot be
// effectively compared when cached internally
public class BackupRequestStrategyTest {
private final Func1<Observable<Integer>, Observable<String>> Operation = new Func1<Observable<Integer>, Observable<String>>() {
@Override
public Observable<String> call(Observable<Integer> t1) {
return t1.map(new Func1<Integer, String>() {
@Override
public String call(Integer t1) {
return t1.toString();
}
});
}
};
private final TestScheduler scheduler = new TestScheduler();
private final BackupRequestRetryStrategy<String> strategy = BackupRequestRetryStrategy.<String>builder()
.withScheduler(scheduler)
.withTimeoutMetric(Metrics.memoize(1000L))
.build();
@Test
public void firstSucceedsFast() {
BehaviorSubject<List<Observable<Integer>>> subject = BehaviorSubject.create(Arrays.asList(
Observable.just(1),
Observable.just(2),
Observable.just(3)));
LoadBalancer<Observable<Integer>> lb = LoadBalancer.fromSnapshotSource(subject).build(RoundRobinLoadBalancer.<Observable<Integer>>create(-1));
final AtomicInteger lbCounter = new AtomicInteger();
final AtomicReference<String> result = new AtomicReference<String>();
lb .toObservable()
.doOnNext(RxUtil.increment(lbCounter))
.flatMap(Operation)
.compose(strategy)
.doOnNext(RxUtil.set(result))
.subscribe();
scheduler.advanceTimeBy(2, TimeUnit.SECONDS);
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
Assert.assertEquals("1", result.get());
Assert.assertEquals(1, lbCounter.get());
}
@Test
public void firstNeverSecondSucceeds() {
BehaviorSubject<List<Observable<Integer>>> subject = BehaviorSubject.create(Arrays.asList(
Observable.<Integer>never(),
Observable.just(2),
Observable.just(3)));
LoadBalancer<Observable<Integer>> lb = LoadBalancer.fromSnapshotSource(subject).build(RoundRobinLoadBalancer.<Observable<Integer>>create(-1));
final AtomicInteger lbCounter = new AtomicInteger();
final AtomicReference<String> result = new AtomicReference<String>();
lb .toObservable()
.doOnNext(RxUtil.increment(lbCounter))
.flatMap(Operation)
.compose(strategy)
.doOnNext(RxUtil.set(result))
.subscribe();
scheduler.advanceTimeBy(2, TimeUnit.SECONDS);
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
Assert.assertEquals("2", result.get());
Assert.assertEquals(2, lbCounter.get());
}
@Test
public void firstFailsSecondSucceeds() {
BehaviorSubject<List<Observable<Integer>>> subject = BehaviorSubject.create(Arrays.asList(
Observable.<Integer>error(new Exception("1")),
Observable.just(2),
Observable.just(3)));
LoadBalancer<Observable<Integer>> lb = LoadBalancer.fromSnapshotSource(subject).build(RoundRobinLoadBalancer.<Observable<Integer>>create(-1));
final AtomicInteger lbCounter = new AtomicInteger();
final AtomicReference<String> result = new AtomicReference<String>();
lb .toObservable()
.doOnNext(RxUtil.increment(lbCounter))
.flatMap(Operation)
.compose(strategy)
.doOnNext(RxUtil.set(result))
.subscribe();
scheduler.advanceTimeBy(2, TimeUnit.SECONDS);
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
Assert.assertEquals("2", result.get());
Assert.assertEquals(2, lbCounter.get());
}
@Test
public void bothDelayed() {
BehaviorSubject<List<Observable<Integer>>> subject = BehaviorSubject.create(Arrays.asList(
Observable.just(1).delaySubscription(2, TimeUnit.SECONDS, scheduler),
Observable.just(2).delaySubscription(2, TimeUnit.SECONDS, scheduler),
Observable.just(3)));
LoadBalancer<Observable<Integer>> lb = LoadBalancer.fromSnapshotSource(subject).build(RoundRobinLoadBalancer.<Observable<Integer>>create(-1));
final AtomicInteger lbCounter = new AtomicInteger();
final AtomicReference<String> result = new AtomicReference<String>();
lb .toObservable()
.doOnNext(RxUtil.increment(lbCounter))
.flatMap(Operation)
.compose(strategy)
.doOnNext(RxUtil.set(result))
.subscribe();
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
scheduler.advanceTimeBy(3, TimeUnit.SECONDS);
Assert.assertEquals(2, lbCounter.get());
Assert.assertEquals("1", result.get());
}
@Test
public void bothFailed() {
BehaviorSubject<List<Observable<Integer>>> subject = BehaviorSubject.create(Arrays.asList(
Observable.<Integer>error(new Exception("1")),
Observable.<Integer>error(new Exception("2")),
Observable.just(3)));
LoadBalancer<Observable<Integer>> lb = LoadBalancer.fromSnapshotSource(subject).build(RoundRobinLoadBalancer.<Observable<Integer>>create(-1));
final AtomicInteger lbCounter = new AtomicInteger();
final AtomicReference<String> result = new AtomicReference<String>();
final AtomicBoolean failed = new AtomicBoolean(false);
lb .toObservable()
.doOnNext(RxUtil.increment(lbCounter))
.flatMap(Operation)
.compose(strategy)
.doOnNext(RxUtil.set(result))
.doOnError(new Action1<Throwable>() {
@Override
public void call(Throwable t1) {
failed.set(true);
}
})
.subscribe();
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
scheduler.advanceTimeBy(3, TimeUnit.SECONDS);
Assert.assertEquals(2, lbCounter.get());
Assert.assertTrue(failed.get());
}
@Test
public void firstSucceedsSecondFailsAfterBackupStarted() {
BehaviorSubject<List<Observable<Integer>>> subject = BehaviorSubject.create(Arrays.asList(
Observable.just(1).delaySubscription(2, TimeUnit.SECONDS, scheduler),
Observable.<Integer>error(new Exception("2")),
Observable.just(3)));
LoadBalancer<Observable<Integer>> lb = LoadBalancer.fromSnapshotSource(subject).build(RoundRobinLoadBalancer.<Observable<Integer>>create(-1));
final AtomicInteger lbCounter = new AtomicInteger();
final AtomicReference<String> result = new AtomicReference<String>();
lb .toObservable()
.doOnNext(RxUtil.increment(lbCounter))
.flatMap(Operation)
.compose(strategy)
.doOnNext(RxUtil.set(result))
.subscribe();
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
scheduler.advanceTimeBy(1, TimeUnit.SECONDS);
Assert.assertEquals("1", result.get());
Assert.assertEquals(2, lbCounter.get());
}
}
| 6,172 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/util/CountDownAction.java | package netflix.ocelli.util;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import rx.functions.Action1;
public class CountDownAction<T> implements Action1<T> {
private CountDownLatch latch;
private CopyOnWriteArrayList<T> list = new CopyOnWriteArrayList<T>();
public CountDownAction(int count) {
latch = new CountDownLatch(count);
}
@Override
public void call(T t1) {
list.add(t1);
latch.countDown();
}
public void await(long timeout, TimeUnit units) throws Exception {
latch.await(timeout, units);
}
public List<T> get() {
return list;
}
public void reset(int count) {
latch = new CountDownLatch(count);
list = new CopyOnWriteArrayList<T>();
}
}
| 6,173 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/util/RandomQueueTest.java | package netflix.ocelli.util;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.TimeUnit;
public class RandomQueueTest {
@Test
public void shouldBeInitiallyEmpty() {
RandomBlockingQueue<Integer> queue = new RandomBlockingQueue<Integer>();
Assert.assertTrue(queue.isEmpty());
Assert.assertNull(queue.peek());
Assert.assertTrue(queue.isEmpty());
try {
queue.remove();
Assert.fail();
}
catch (NoSuchElementException e) {
}
}
@Test
public void shouldBlockEmptyQueue() throws InterruptedException {
RandomBlockingQueue<Integer> queue = new RandomBlockingQueue<Integer>();
Assert.assertNull(queue.poll(100, TimeUnit.MILLISECONDS));
}
@Test
@Ignore
public void addRemoveAndShouldBlock() throws InterruptedException {
RandomBlockingQueue<Integer> queue = new RandomBlockingQueue<Integer>();
queue.add(123);
Integer item = queue.take();
Assert.assertEquals((Integer)123, item);
Assert.assertNull(queue.poll(100, TimeUnit.MILLISECONDS));
}
@Test
public void addOne() {
RandomBlockingQueue<Integer> queue = new RandomBlockingQueue<Integer>();
queue.add(123);
Assert.assertTrue(!queue.isEmpty());
Assert.assertEquals(1, queue.size());
Assert.assertEquals((Integer)123, queue.peek());
Assert.assertEquals((Integer)123, queue.poll());
Assert.assertTrue(queue.isEmpty());
Assert.assertEquals(0, queue.size());
Assert.assertNull(queue.peek());
try {
queue.remove();
Assert.fail();
}
catch (NoSuchElementException e) {
}
}
@Test(timeout=1000)
public void removeIsRandom() {
RandomBlockingQueue<Integer> queue = new RandomBlockingQueue<Integer>();
List<Integer> items = new ArrayList<Integer>();
for (int i = 0; i < 100; i++) {
items.add(i);
queue.add(i);
}
List<Integer> actual = new ArrayList<Integer>();
Integer item;
while (null != (item = queue.poll())) {
actual.add(item);
}
Assert.assertTrue(queue.isEmpty());
Assert.assertEquals(100, actual.size());
Assert.assertNotSame(items, actual);
Collections.sort(actual);
Assert.assertEquals(items, actual);
}
}
| 6,174 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/toplogies/TopologiesTest.java | package netflix.ocelli.toplogies;
import com.google.common.collect.Sets;
import netflix.ocelli.CloseableInstance;
import netflix.ocelli.Host;
import netflix.ocelli.Instance;
import netflix.ocelli.InstanceCollector;
import netflix.ocelli.functions.Functions;
import netflix.ocelli.topologies.RingTopology;
import netflix.ocelli.util.RxUtil;
import org.junit.Assert;
import org.junit.Test;
import rx.schedulers.TestScheduler;
import rx.subjects.PublishSubject;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
public class TopologiesTest {
public static class HostWithId extends Host {
private final Integer id;
public HostWithId(String hostName, int port, Integer id) {
super(hostName, port);
this.id = id;
}
public Integer getId() {
return this.id;
}
public String toString() {
return id.toString();
}
}
@Test
public void test() {
CloseableInstance<Integer> m1 = CloseableInstance.from(1);
CloseableInstance<Integer> m2 = CloseableInstance.from(2);
CloseableInstance<Integer> m3 = CloseableInstance.from(3);
CloseableInstance<Integer> m4 = CloseableInstance.from(4);
CloseableInstance<Integer> m6 = CloseableInstance.from(6);
CloseableInstance<Integer> m7 = CloseableInstance.from(7);
CloseableInstance<Integer> m8 = CloseableInstance.from(8);
CloseableInstance<Integer> m9 = CloseableInstance.from(9);
CloseableInstance<Integer> m10 = CloseableInstance.from(10);
CloseableInstance<Integer> m11 = CloseableInstance.from(11);
PublishSubject<Instance<Integer>> members = PublishSubject.create();
TestScheduler scheduler = new TestScheduler();
RingTopology<Integer, Integer> mapper = RingTopology.create(5, Functions.identity(), Functions.memoize(3), scheduler);
AtomicReference<List<Integer>> current = new AtomicReference<List<Integer>>();
members
.doOnNext(RxUtil.info("add"))
.compose(mapper)
.compose(InstanceCollector.<Integer>create())
.doOnNext(RxUtil.info("current"))
.subscribe(RxUtil.set(current));
members.onNext(m11);
Assert.assertEquals(Sets.newHashSet(11) , Sets.newHashSet(current.get()));
members.onNext(m7);
Assert.assertEquals(Sets.newHashSet(7, 11) , Sets.newHashSet(current.get()));
members.onNext(m1);
Assert.assertEquals(Sets.newHashSet(1, 7, 11) , Sets.newHashSet(current.get()));
members.onNext(m2);
Assert.assertEquals(Sets.newHashSet(1, 7, 11) , Sets.newHashSet(current.get()));
members.onNext(m4);
Assert.assertEquals(Sets.newHashSet(1, 7, 11) , Sets.newHashSet(current.get()));
members.onNext(m3);
Assert.assertEquals(Sets.newHashSet(1, 7, 11) , Sets.newHashSet(current.get()));
members.onNext(m8);
Assert.assertEquals(Sets.newHashSet(7, 8, 11) , Sets.newHashSet(current.get()));
members.onNext(m10);
Assert.assertEquals(Sets.newHashSet(7, 8, 10), Sets.newHashSet(current.get()));
members.onNext(m9);
Assert.assertEquals(Sets.newHashSet(7, 8, 9), Sets.newHashSet(current.get()));
members.onNext(m6);
Assert.assertEquals(Sets.newHashSet(6, 7, 8), Sets.newHashSet(current.get()));
m6.close();
Assert.assertEquals(Sets.newHashSet(7, 8, 9), Sets.newHashSet(current.get()));
m9.close();
Assert.assertEquals(Sets.newHashSet(7, 8, 10), Sets.newHashSet(current.get()));
m8.close();
Assert.assertEquals(Sets.newHashSet(7, 10, 11), Sets.newHashSet(current.get()));
}
}
| 6,175 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/loadbalancer/ChoiceOfTwoLoadBalancerTest.java | package netflix.ocelli.loadbalancer;
import com.google.common.collect.Lists;
import netflix.ocelli.LoadBalancer;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import rx.subjects.BehaviorSubject;
import java.util.Comparator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.atomic.AtomicIntegerArray;
public class ChoiceOfTwoLoadBalancerTest {
@Rule
public TestName name = new TestName();
private static final Comparator<Integer> COMPARATOR = new Comparator<Integer>() {
@Override
public int compare(Integer o1, Integer o2) {
return o1 - o2;
}
};
@Test(expected=NoSuchElementException.class)
public void testEmpty() {
BehaviorSubject<List<Integer>> source = BehaviorSubject.create();
LoadBalancer<Integer> lb = LoadBalancer.fromSnapshotSource(source).build(ChoiceOfTwoLoadBalancer.create(COMPARATOR));
source.onNext(Lists.<Integer>newArrayList());
lb.next();
}
@Test
public void testOne() {
BehaviorSubject<List<Integer>> source = BehaviorSubject.create();
LoadBalancer<Integer> lb = LoadBalancer.fromSnapshotSource(source).build(ChoiceOfTwoLoadBalancer.create(COMPARATOR));
source.onNext(Lists.newArrayList(0));
for (int i = 0; i < 100; i++) {
Assert.assertEquals(0, (int)lb.next());
}
}
@Test
public void testTwo() {
BehaviorSubject<List<Integer>> source = BehaviorSubject.create();
LoadBalancer<Integer> lb = LoadBalancer.fromSnapshotSource(source).build(ChoiceOfTwoLoadBalancer.create(COMPARATOR));
source.onNext(Lists.newArrayList(0,1));
AtomicIntegerArray counts = new AtomicIntegerArray(2);
for (int i = 0; i < 100; i++) {
counts.incrementAndGet(lb.next());
}
Assert.assertEquals(counts.get(0), 0);
Assert.assertEquals(counts.get(1), 100);
}
@Test
public void testMany() {
BehaviorSubject<List<Integer>> source = BehaviorSubject.create();
LoadBalancer<Integer> lb = LoadBalancer.fromSnapshotSource(source).build(ChoiceOfTwoLoadBalancer.create(COMPARATOR));
source.onNext(Lists.newArrayList(0,1,2,3,4,5,6,7,8,9));
AtomicIntegerArray counts = new AtomicIntegerArray(10);
for (int i = 0; i < 100000; i++) {
counts.incrementAndGet(lb.next());
}
Double[] pct = new Double[counts.length()];
for (int i = 0; i < counts.length(); i++) {
pct[i] = counts.get(i)/100000.0;
}
for (int i = 1; i < counts.length(); i++) {
Assert.assertTrue(counts.get(i) > counts.get(i-1));
}
}
}
| 6,176 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/loadbalancer | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/loadbalancer/weighting/InverseMaxWeightingStrategyTest.java | package netflix.ocelli.loadbalancer.weighting;
import com.google.common.collect.Lists;
import netflix.ocelli.LoadBalancer;
import netflix.ocelli.loadbalancer.RandomWeightedLoadBalancer;
import netflix.ocelli.retry.RetryFailedTestRule;
import netflix.ocelli.retry.RetryFailedTestRule.Retry;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import rx.subjects.BehaviorSubject;
import java.util.Arrays;
import java.util.List;
import java.util.NoSuchElementException;
public class InverseMaxWeightingStrategyTest extends BaseWeightingStrategyTest {
@Rule
public RetryFailedTestRule retryRule = new RetryFailedTestRule();
BehaviorSubject<List<IntClientAndMetrics>> subject = BehaviorSubject.create();
LoadBalancer<IntClientAndMetrics> lb = LoadBalancer.fromSnapshotSource(subject).build(RandomWeightedLoadBalancer.create(
new InverseMaxWeightingStrategy<IntClientAndMetrics>(IntClientAndMetrics.BY_METRIC)));
@Test(expected=NoSuchElementException.class)
public void testEmptyClients() throws Throwable {
List<IntClientAndMetrics> clients = create();
subject.onNext(clients);
List<Integer> counts = Arrays.<Integer>asList(roundToNearest(simulate(lb, clients.size(), 1000), 100));
Assert.assertEquals(Lists.newArrayList(), counts);
}
@Test
@Retry(5)
public void testOneClient() throws Throwable {
List<IntClientAndMetrics> clients = create(10);
subject.onNext(clients);
List<Integer> counts = Arrays.<Integer>asList(roundToNearest(simulate(lb, clients.size(), 1000), 100));
Assert.assertEquals(Lists.newArrayList(1000), counts);
}
@Test
@Retry(5)
public void testEqualsWeights() throws Throwable {
List<IntClientAndMetrics> clients = create(1,1,1,1);
subject.onNext(clients);
List<Integer> counts = Arrays.<Integer>asList(roundToNearest(simulate(lb, clients.size(), 4000), 100));
Assert.assertEquals(Lists.newArrayList(1000, 1000, 1000, 1000), counts);
}
@Test
@Retry(5)
public void testDifferentWeights() throws Throwable {
List<IntClientAndMetrics> clients = create(1,2,3,4);
subject.onNext(clients);
List<Integer> counts = Arrays.<Integer>asList(roundToNearest(simulate(lb, clients.size(), 4000), 100));
Assert.assertEquals(Lists.newArrayList(1600, 1200, 800, 400), counts);
}
}
| 6,177 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/loadbalancer | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/loadbalancer/weighting/LinearWeightingStrategyTest.java | package netflix.ocelli.loadbalancer.weighting;
import com.google.common.collect.Lists;
import netflix.ocelli.LoadBalancer;
import netflix.ocelli.loadbalancer.RandomWeightedLoadBalancer;
import netflix.ocelli.retry.RetryFailedTestRule;
import netflix.ocelli.retry.RetryFailedTestRule.Retry;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import rx.subjects.BehaviorSubject;
import java.util.Arrays;
import java.util.List;
import java.util.NoSuchElementException;
public class LinearWeightingStrategyTest extends BaseWeightingStrategyTest {
@Rule
public RetryFailedTestRule retryRule = new RetryFailedTestRule();
BehaviorSubject<List<IntClientAndMetrics>> subject = BehaviorSubject.create();
LoadBalancer<IntClientAndMetrics> lb = LoadBalancer.fromSnapshotSource(subject).build(RandomWeightedLoadBalancer.create(
new LinearWeightingStrategy<IntClientAndMetrics>(IntClientAndMetrics.BY_METRIC)));
@Test(expected=NoSuchElementException.class)
public void testEmptyClients() throws Throwable {
List<IntClientAndMetrics> clients = create();
subject.onNext(clients);
List<Integer> counts = Arrays.<Integer>asList(roundToNearest(simulate(lb, clients.size(), 1000), 100));
Assert.assertEquals(Lists.newArrayList(), counts);
}
@Test
@Retry(5)
public void testOneClient() throws Throwable {
List<IntClientAndMetrics> clients = create(10);
subject.onNext(clients);
List<Integer> counts = Arrays.<Integer>asList(roundToNearest(simulate(lb, clients.size(), 1000), 100));
Assert.assertEquals(Lists.newArrayList(1000), counts);
}
@Test
@Retry(5)
public void testEqualsWeights() throws Throwable {
List<IntClientAndMetrics> clients = create(1,1,1,1);
subject.onNext(clients);
List<Integer> counts = Arrays.<Integer>asList(roundToNearest(simulate(lb, clients.size(), 4000), 100));
Assert.assertEquals(Lists.newArrayList(1000, 1000, 1000, 1000), counts);
}
@Test
@Retry(5)
public void testDifferentWeights() throws Throwable {
List<IntClientAndMetrics> clients = create(1,2,3,4);
subject.onNext(clients);
List<Integer> counts = Arrays.<Integer>asList(roundToNearest(simulate(lb, clients.size(), 4000), 100));
Assert.assertEquals(Lists.newArrayList(400, 800, 1200, 1600), counts);
}
}
| 6,178 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/loadbalancer | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/loadbalancer/weighting/IntClientAndMetrics.java | package netflix.ocelli.loadbalancer.weighting;
import rx.functions.Func1;
public class IntClientAndMetrics {
private Integer client;
private Integer metrics;
public IntClientAndMetrics(int client, int metrics) {
this.client = client;
this.metrics = metrics;
}
public Integer getClient() {
return client;
}
public Integer getMetrics() {
return metrics;
}
public static Func1<IntClientAndMetrics, Integer> BY_METRIC = new Func1<IntClientAndMetrics, Integer>() {
@Override
public Integer call(IntClientAndMetrics t1) {
return t1.getMetrics();
}
};
@Override
public String toString() {
return "IntClientAndMetrics [client=" + client + ", metrics=" + metrics + "]";
}
}
| 6,179 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/loadbalancer | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/loadbalancer/weighting/BaseWeightingStrategyTest.java | package netflix.ocelli.loadbalancer.weighting;
import java.util.ArrayList;
import java.util.List;
import netflix.ocelli.LoadBalancer;
import org.junit.Ignore;
import rx.exceptions.OnErrorNotImplementedException;
import rx.functions.Action1;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
@Ignore
public class BaseWeightingStrategyTest {
/**
* Creates a list of clients
*
* @param weights
* @return
*/
static List<IntClientAndMetrics> create(Integer... weights) {
List<IntClientAndMetrics> cam = new ArrayList<IntClientAndMetrics>(weights.length);
int counter = 0;
for (int i = 0; i < weights.length; i++) {
cam.add(new IntClientAndMetrics(counter++, weights[i]));
}
return cam;
}
/**
* Get an array of weights with indexes matching the list of clients.
* @param caw
* @return
*/
static int[] getWeights(ClientsAndWeights<IntClientAndMetrics> caw) {
int[] weights = new int[caw.size()];
for (int i = 0; i < caw.size(); i++) {
weights[i] = caw.getWeight(i);
}
return weights;
}
/**
* Run a simulation of 'count' selects and update the clients
* @param strategy
* @param N
* @param count
* @return
* @throws Throwable
*/
static Integer[] simulate(LoadBalancer<IntClientAndMetrics> lb, int N, int count) throws Throwable {
// Set up array of counts
final Integer[] counts = new Integer[N];
for (int i = 0; i < N; i++) {
counts[i] = 0;
}
// Run simulation
for (int i = 0; i < count; i++) {
try {
lb.toObservable().subscribe(new Action1<IntClientAndMetrics>() {
@Override
public void call(IntClientAndMetrics t1) {
counts[t1.getClient()] = counts[t1.getClient()] + 1;
}
});
}
catch (OnErrorNotImplementedException e) {
throw e.getCause();
}
}
return counts;
}
static Integer[] roundToNearest(Integer[] counts, int amount) {
int middle = amount / 2;
for (int i = 0; i < counts.length; i++) {
counts[i] = amount * ((counts[i] + middle) / amount);
}
return counts;
}
static String printClients(IntClientAndMetrics[] clients) {
return Joiner.on(", ").join(Collections2.transform(Lists.newArrayList(clients), new Function<IntClientAndMetrics, Integer>() {
@Override
public Integer apply(IntClientAndMetrics arg0) {
return arg0.getClient();
}
}));
}
static String printMetrics(IntClientAndMetrics[] clients) {
return Joiner.on(", ").join(Collections2.transform(Lists.newArrayList(clients), new Function<IntClientAndMetrics, Integer>() {
@Override
public Integer apply(IntClientAndMetrics arg0) {
return arg0.getMetrics();
}
}));
}
}
| 6,180 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/functions/GuardsTest.java | package netflix.ocelli.functions;
import org.junit.Test;
import rx.functions.Func1;
public class GuardsTest {
@Test
public void shouldRejectAfter100Percent() {
Func1<Boolean, Boolean> guard = Limiters.exponential(0.90, 30);
int discarded = 0;
for (int i = 0; i < 100; i++) {
guard.call(true);
if (!guard.call(false)) {
discarded++;
}
}
System.out.println("Discarded : " + discarded);
}
@Test
public void shouldRejectAfter90Percent() {
Func1<Boolean, Boolean> guard = Limiters.exponential(0.90, 30);
int discarded = 0;
for (int i = 0; i < 100; i++) {
guard.call(true);
if (i % 5 == 0) {
if (!guard.call(false)) {
discarded++;
}
}
}
System.out.println("Discarded : " + discarded);
}
}
| 6,181 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/perf/PerfTest.java | package netflix.ocelli.perf;
import java.util.ArrayList;
import java.util.List;
import netflix.ocelli.LoadBalancer;
import netflix.ocelli.client.Behaviors;
import netflix.ocelli.client.Connects;
import netflix.ocelli.client.TestClient;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PerfTest {
private static final Logger LOG = LoggerFactory.getLogger(PerfTest.class);
private static final int NUM_HOSTS = 1000;
private LoadBalancer<TestClient> selector;
@BeforeClass
public static void setup() {
List<TestClient> hosts = new ArrayList<TestClient>();
for (int i = 0; i < NUM_HOSTS-1; i++) {
// hosts.add(TestHost.create("host-"+i, Connects.immediate(), Behaviors.delay(100 + (int)(100 * RANDOM.nextDouble()), TimeUnit.MILLISECONDS)));
// hosts.add(TestHost.create("host-"+i, Connects.immediate(), Behaviors.proportionalToLoad(100, 10, TimeUnit.MILLISECONDS)));
hosts.add(TestClient.create("host-"+i, Connects.immediate(), Behaviors.immediate()));
}
// hosts.add(TestHost.create("degrading", Connects.immediate(), Behaviors.degradation(100, 50, TimeUnit.MILLISECONDS)));
// source = Observable
// .from(hosts)
// .map(MembershipEvent.<TestClient>toEvent(EventType.ADD));
}
@Test
@Ignore
public void perf() throws InterruptedException {
// ClientLifecycleFactory<TestClient> factory =
// FailureDetectingClientLifecycleFactory.<TestClient>builder()
// .build();
// this.selector = RoundRobinLoadBalancer.create(source.lift(ClientCollector.create(factory)));
//
//// this.selector.prime(10).toBlocking().last();
//
// Observable.range(1, 10)
// .subscribe(new Action1<Integer>() {
// @Override
// public void call(final Integer id) {
// Observable.interval(100, TimeUnit.MILLISECONDS, Schedulers.newThread())
// .subscribe(new Action1<Long>() {
// @Override
// public void call(final Long counter) {
// Observable.create(selector)
// .concatMap(new TrackingOperation(counter + ""))
// .retry()
// .subscribe(new Action1<String>() {
// @Override
// public void call(String t1) {
// LOG.info("{} - {} - {}", id, counter, t1);
// }
// });
// }
// });
// }
// });
}
@Test
@Ignore
public void perf2() throws InterruptedException {
// ClientLifecycleFactory<TestClient> factory =
// FailureDetectingClientLifecycleFactory.<TestClient>builder()
// .build();
//
// this.selector = RoundRobinLoadBalancer.create(source.lift(ClientCollector.create(factory)));
//
// final AtomicLong messageCount = new AtomicLong(0);
//
// Observable.range(1, 400)
// .subscribe(new Action1<Integer>() {
// @Override
// public void call(final Integer id) {
// Observable.interval(10, TimeUnit.MILLISECONDS, Schedulers.newThread())
// .subscribe(new Action1<Long>() {
// @Override
// public void call(final Long counter) {
// Observable.create(selector)
// .concatMap(new TrackingOperation(counter + ""))
// .retry()
// .subscribe(new Action1<String>() {
// @Override
// public void call(String t1) {
// messageCount.incrementAndGet();
//
//// LOG.info("{} - {} - {}", id, counter, t1);
// }
// });
// }
// });
// }
// });
//
// Observable.interval(1, TimeUnit.SECONDS).subscribe(new Action1<Long>() {
// private long previous = 0;
// @Override
// public void call(Long t1) {
// long current = messageCount.get();
// LOG.info("Rate : " + (current - previous) + " Host count: " + selector.all().count().toBlocking().first());
// previous = current;
// }
// });
}
}
| 6,182 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/client/Operations.java | package netflix.ocelli.client;
import rx.Observable;
import rx.functions.Func1;
import java.net.SocketTimeoutException;
import java.util.concurrent.TimeUnit;
public class Operations {
public static Func1<TestClient, Observable<String>> delayed(final long duration, final TimeUnit units) {
return new Func1<TestClient, Observable<String>>() {
@Override
public Observable<String> call(final TestClient server) {
return Observable
.interval(duration, units)
.first()
.map(new Func1<Long, String>() {
@Override
public String call(Long t1) {
return server + "-ok";
}
});
}
};
}
public static Func1<TestClient, Observable<String>> timeout(final long duration, final TimeUnit units) {
return new Func1<TestClient, Observable<String>>() {
@Override
public Observable<String> call(final TestClient server) {
return Observable
.interval(duration, units)
.flatMap(new Func1<Long, Observable<String>>() {
@Override
public Observable<String> call(Long t1) {
return Observable.error(new SocketTimeoutException("Timeout"));
}
});
}
};
}
public static TrackingOperation tracking(String response) {
return new TrackingOperation(response);
}
}
| 6,183 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/client/Connects.java | package netflix.ocelli.client;
import rx.Observable;
import rx.functions.Func1;
import java.util.concurrent.TimeUnit;
public class Connects {
public static Observable<Void> delay(final long timeout, final TimeUnit units) {
return Observable.timer(timeout, units).ignoreElements().cast(Void.class);
}
public static Observable<Void> failure() {
return Observable.error(new Exception("Connectus interruptus"));
}
public static Observable<Void> failure(final long timeout, final TimeUnit units) {
return Observable.timer(timeout, units).concatMap(new Func1<Long, Observable<Void>>() {
@Override
public Observable<Void> call(Long t1) {
return Observable.error(new Exception("Connectus interruptus"));
}
});
}
public static Observable<Void> immediate() {
return Observable.empty();
}
public static Observable<Void> never() {
return Observable.never();
}
public static Observable<Void> error(Exception e) {
return Observable.error(e);
}
}
| 6,184 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/client/TestClient.java | package netflix.ocelli.client;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicLong;
import netflix.ocelli.util.RxUtil;
import rx.Observable;
import rx.Observer;
import rx.functions.Func1;
import rx.functions.Func2;
public class TestClient {
private final String id;
private final Func1<TestClient, Observable<TestClient>> behavior;
private final Observable<Void> connect;
private final int concurrency = 10;
private final Semaphore sem = new Semaphore(concurrency);
private final Set<String> vips = new HashSet<String>();
private String rack;
public static Func1<TestClient, Observable<String>> byVip() {
return new Func1<TestClient, Observable<String>>() {
@Override
public Observable<String> call(TestClient t1) {
return Observable.from(t1.vips).concatWith(Observable.just("*"));
}
};
}
public static Func1<TestClient, Observable<String>> byRack() {
return new Func1<TestClient, Observable<String>>() {
@Override
public Observable<String> call(TestClient t1) {
return Observable.just(t1.rack);
}
};
}
public static Func1<TestClient, Integer> byPendingRequestCount() {
return new Func1<TestClient, Integer>() {
@Override
public Integer call(TestClient t1) {
return t1.sem.availablePermits();
}
};
}
public static TestClient create(String id, Observable<Void> connect, Func1<TestClient, Observable<TestClient>> behavior) {
return new TestClient(id, connect, behavior);
}
public static TestClient create(String id, Func1<TestClient, Observable<TestClient>> behavior) {
return new TestClient(id, Connects.immediate(), behavior);
}
public static Func2<TestClient, String, Observable<String>> func() {
return new Func2<TestClient, String,Observable<String>>() {
@Override
public Observable<String> call(TestClient client, String request) {
return client.execute(new Func1<TestClient, Observable<String>>() {
@Override
public Observable<String> call(TestClient t1) {
return Observable.just(t1.id());
}
});
}
};
}
public TestClient(String id, Observable<Void> connect, Func1<TestClient, Observable<TestClient>> behavior) {
this.id = id;
this.behavior = behavior;
this.connect = connect;
}
public Observable<Void> connect() {
return connect;
}
public TestClient withVip(String vip) {
this.vips.add(vip);
return this;
}
public TestClient withRack(String rack) {
this.rack = rack;
return this;
}
public Set<String> vips() {
return this.vips;
}
public String rack() {
return this.rack;
}
public String id() {
return this.id;
}
private AtomicLong executeCount = new AtomicLong(0);
private AtomicLong onNextCount = new AtomicLong(0);
private AtomicLong onCompletedCount = new AtomicLong(0);
private AtomicLong onSubscribeCount = new AtomicLong(0);
private AtomicLong onUnSubscribeCount = new AtomicLong(0);
private AtomicLong onErrorCount = new AtomicLong(0);
public long getExecuteCount() {
return executeCount.get();
}
public long getOnNextCount() {
return onNextCount.get();
}
public long getOnCompletedCount() {
return onCompletedCount.get();
}
public long getOnErrorCount() {
return onErrorCount.get();
}
public long getOnSubscribeCount() {
return onSubscribeCount.get();
}
public long getOnUnSubscribeCount() {
return onUnSubscribeCount.get();
}
public boolean hasError() {
return onErrorCount.get() > 0;
}
public Observable<String> execute(Func1<TestClient, Observable<String>> operation) {
this.executeCount.incrementAndGet();
return behavior.call(this)
.doOnSubscribe(RxUtil.increment(onSubscribeCount))
.doOnSubscribe(RxUtil.acquire(sem))
.doOnUnsubscribe(RxUtil.increment(onUnSubscribeCount))
.concatMap(operation)
.doOnEach(new Observer<String>() {
@Override
public void onCompleted() {
onCompletedCount.incrementAndGet();
sem.release();
}
@Override
public void onError(Throwable e) {
onErrorCount.incrementAndGet();
}
@Override
public void onNext(String t) {
onNextCount.incrementAndGet();
}
});
}
public String toString() {
return "Host[id=" + id + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((id == null) ? 0 : id.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TestClient other = (TestClient) obj;
if (id == null) {
if (other.id != null)
return false;
} else if (!id.equals(other.id))
return false;
return true;
}
}
| 6,185 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/client/Behaviors.java | package netflix.ocelli.client;
import netflix.ocelli.util.RxUtil;
import rx.Observable;
import rx.Scheduler;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
public class Behaviors {
public static Func1<TestClient, Observable<TestClient>> delay(final long amount, final TimeUnit units) {
return delay(amount, units, Schedulers.computation());
}
public static Func1<TestClient, Observable<TestClient>> delay(final long amount, final TimeUnit units, final Scheduler scheduler) {
return new Func1<TestClient, Observable<TestClient>>() {
@Override
public Observable<TestClient> call(final TestClient client) {
return Observable
.just(client)
.delay(amount, units, scheduler)
;
}
};
}
public static Func1<TestClient, Observable<TestClient>> immediate() {
return new Func1<TestClient, Observable<TestClient>>() {
@Override
public Observable<TestClient> call(TestClient client) {
return Observable
.just(client);
}
};
}
public static Func1<TestClient, Observable<TestClient>> failure(final long amount, final TimeUnit units) {
return failure(amount, units, Schedulers.computation());
}
public static Func1<TestClient, Observable<TestClient>> failure(final long amount, final TimeUnit units, final Scheduler scheduler) {
return new Func1<TestClient, Observable<TestClient>>() {
@Override
public Observable<TestClient> call(TestClient client) {
return Observable.timer(amount, units, scheduler)
.flatMap(new Func1<Long, Observable<TestClient>>() {
@Override
public Observable<TestClient> call(Long t1) {
return Observable.error(new Exception("SimulatedErrorBehavior"));
}
});
}
};
}
public static Func1<TestClient, Observable<TestClient>> failFirst(final int num) {
return new Func1<TestClient, Observable<TestClient>>() {
private int counter;
@Override
public Observable<TestClient> call(TestClient client) {
if (counter++ < num) {
return Observable.error(new Exception("Failure-" + counter));
}
return Observable.just(client);
}
};
}
public static Func1<TestClient, Observable<TestClient>> failure() {
return new Func1<TestClient, Observable<TestClient>>() {
@Override
public Observable<TestClient> call(TestClient client) {
return Observable
.just(client)
.concatWith(Observable.<TestClient>error(new Exception("SimulatedErrorBehavior")));
}
};
}
public static Func1<TestClient, Observable<TestClient>> degradation(final long initial, final long step, final TimeUnit units) {
return new Func1<TestClient, Observable<TestClient>>() {
private AtomicLong counter = new AtomicLong(0);
@Override
public Observable<TestClient> call(TestClient client) {
return Observable
.just(client)
.delay(initial + counter.incrementAndGet() + step, units);
}
};
}
public static Func1<TestClient, Observable<TestClient>> proportionalToLoad(final long baseline, final long step, final TimeUnit units) {
return new Func1<TestClient, Observable<TestClient>>() {
private AtomicLong counter = new AtomicLong(0);
@Override
public Observable<TestClient> call(TestClient client) {
final long count = counter.incrementAndGet();
return Observable
.just(client)
.delay(baseline + count + step, units)
.finallyDo(RxUtil.decrement(counter));
}
};
}
public static Func1<TestClient, Observable<TestClient>> empty() {
return new Func1<TestClient, Observable<TestClient>>() {
@Override
public Observable<TestClient> call(TestClient t1) {
return Observable.empty();
}
};
}
// public static poissonDelay()
// public static gaussianDelay();
// public static gcPauses();
}
| 6,186 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/client/TrackingOperation.java | package netflix.ocelli.client;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
import java.util.ArrayList;
import java.util.List;
public class TrackingOperation implements Func1<TestClient, Observable<String>> {
private static final Logger LOG = LoggerFactory.getLogger(TrackingOperation.class);
private final String response;
private List<TestClient> servers = new ArrayList<TestClient>();
public TrackingOperation(String response) {
this.response = response;
}
@Override
public Observable<String> call(final TestClient client) {
servers.add(client);
return client.execute(new Func1<TestClient, Observable<String>>() {
@Override
public Observable<String> call(TestClient t1) {
return Observable.just(response);
}
});
}
public List<TestClient> getServers() {
return servers;
}
}
| 6,187 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/client/ManualFailureDetector.java | package netflix.ocelli.client;
import java.util.concurrent.ConcurrentMap;
import rx.Observable;
import rx.functions.Func1;
import rx.subjects.PublishSubject;
import com.google.common.collect.Maps;
public class ManualFailureDetector implements Func1<TestClient, Observable<Throwable>> {
private ConcurrentMap<TestClient, PublishSubject<Throwable>> clients = Maps.newConcurrentMap();
@Override
public Observable<Throwable> call(TestClient client) {
PublishSubject<Throwable> subject = PublishSubject.create();
PublishSubject<Throwable> prev = clients.putIfAbsent(client, subject);
if (prev != null) {
subject = prev;
}
return subject;
}
public PublishSubject<Throwable> get(TestClient client) {
return clients.get(client);
}
}
| 6,188 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/client/ResponseObserver.java | package netflix.ocelli.client;
import rx.Observer;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
public class ResponseObserver implements Observer<String> {
private volatile Throwable t;
private volatile String response;
private CountDownLatch latch = new CountDownLatch(1);
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public void onError(Throwable e) {
this.t = e;
latch.countDown();
}
@Override
public void onNext(String t) {
this.response = t;
}
public String await(long duration, TimeUnit units) throws Throwable {
latch.await(duration, units);
if (this.t != null)
throw this.t;
return response;
}
public String get() {
return response;
}
}
| 6,189 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/client/TestClientConnector.java | package netflix.ocelli.client;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Subscriber;
import rx.subjects.PublishSubject;
public class TestClientConnector implements OnSubscribe<Void> {
private final PublishSubject<TestClient> stream = PublishSubject.create();
private final TestClient client;
public TestClientConnector(TestClient client) {
this.client = client;
}
@Override
public void call(Subscriber<? super Void> s) {
s.onCompleted();
stream.onNext(client);
}
public Observable<TestClient> stream() {
return stream;
}
}
| 6,190 |
0 | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli | Create_ds/ocelli/ocelli-core/src/test/java/netflix/ocelli/client/TestClientConnectorFactory.java | package netflix.ocelli.client;
import java.util.concurrent.ConcurrentMap;
import rx.Observable;
import rx.functions.Func1;
import com.google.common.collect.Maps;
public class TestClientConnectorFactory implements Func1<TestClient, Observable<Void>> {
private ConcurrentMap<TestClient, TestClientConnector> connectors = Maps.newConcurrentMap();
@Override
public Observable<Void> call(TestClient client) {
return Observable.create(get(client));
}
public TestClientConnector get(TestClient client) {
TestClientConnector connector = new TestClientConnector(client);
TestClientConnector prev = connectors.putIfAbsent(client, connector);
if (prev != null) {
connector = prev;
}
return connector;
}
}
| 6,191 |
0 | Create_ds/ocelli/ocelli-core/src/main/java/netflix | Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/InstanceToNotification.java | package netflix.ocelli;
import netflix.ocelli.InstanceToNotification.InstanceNotification;
import rx.Notification;
import rx.Observable;
import rx.functions.Func1;
public class InstanceToNotification<T> implements Func1<Instance<T>, Observable<InstanceNotification<T>>> {
public static <T> InstanceToNotification<T> create() {
return new InstanceToNotification<T>();
}
public enum Kind {
OnAdd,
OnRemove
}
public static class InstanceNotification<T> {
private final Instance<T> value;
private final Kind kind;
public InstanceNotification(Instance<T> instance, Kind kind) {
this.value = instance;
this.kind = kind;
}
public Kind getKind() {
return kind;
}
public Instance<T> getInstance() {
return value;
}
public String toString() {
return "Notification[" + value + " " + kind + "]";
}
}
@Override
public Observable<InstanceNotification<T>> call(final Instance<T> instance) {
return Observable
.just(new InstanceNotification<T>(instance, Kind.OnAdd))
.concatWith(instance.getLifecycle().materialize().map(new Func1<Notification<Void>, InstanceNotification<T>>() {
@Override
public InstanceNotification<T> call(Notification<Void> t1) {
return new InstanceNotification<T>(instance, Kind.OnRemove);
}
}));
}
}
| 6,192 |
0 | Create_ds/ocelli/ocelli-core/src/main/java/netflix | Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/InstanceManager.java | package netflix.ocelli;
import java.util.ArrayList;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import rx.Observable;
import rx.Subscriber;
import rx.subjects.PublishSubject;
/**
* InstanceSubject can be used as a basic bridge from an add/remove host membership
* paradigm to Ocelli's internal Instance with lifecycle representation of entity
* membership in the load balancer.
*
* @see LoadBalancer
*
* @author elandau
*/
public class InstanceManager<T> extends Observable<Instance<T>> {
private final PublishSubject<Instance<T>> subject;
private final ConcurrentMap<T, CloseableInstance<T>> instances;
public static <T> InstanceManager<T> create() {
return new InstanceManager<T>();
}
public InstanceManager() {
this(PublishSubject.<Instance<T>>create(), new ConcurrentHashMap<T, CloseableInstance<T>>());
}
private InstanceManager(final PublishSubject<Instance<T>> subject, final ConcurrentMap<T, CloseableInstance<T>> instances) {
super(new OnSubscribe<Instance<T>>() {
@Override
public void call(Subscriber<? super Instance<T>> s) {
// TODO: This is a very naive implementation that may have race conditions
// whereby instances may be dropped
Observable
.from(new ArrayList<Instance<T>>(instances.values()))
.concatWith(subject).subscribe(s);
}
});
this.subject = subject;
this.instances = instances;
}
/**
* Add an entity to the source, which feeds into a load balancer
* @param t
* @return
*/
public CloseableInstance<T> add(T t) {
CloseableInstance<T> member = CloseableInstance.from(t);
CloseableInstance<T> existing = instances.putIfAbsent(t, member);
if (null == existing) {
subject.onNext(member);
return member;
}
return existing;
}
/**
* Remove an entity from the source. If the entity exists it's lifecycle will
* onComplete.
*
* @param t
* @return
*/
public CloseableInstance<T> remove(T t) {
CloseableInstance<T> member = instances.remove(t);
if (member != null) {
member.close();
}
return member;
}
}
| 6,193 |
0 | Create_ds/ocelli/ocelli-core/src/main/java/netflix | Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/AbstractLoadBalancerEvent.java | package netflix.ocelli;
@SuppressWarnings("rawtypes")
public class AbstractLoadBalancerEvent <T extends Enum> implements LoadBalancerEvent<T> {
protected final T name;
protected final boolean isTimed;
protected final boolean isError;
protected AbstractLoadBalancerEvent(T name, boolean isTimed, boolean isError) {
this.isTimed = isTimed;
this.name = name;
this.isError = isError;
}
@Override
public T getType() {
return name;
}
@Override
public boolean isTimed() {
return isTimed;
}
@Override
public boolean isError() {
return isError;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof AbstractLoadBalancerEvent)) {
return false;
}
AbstractLoadBalancerEvent that = (AbstractLoadBalancerEvent) o;
if (isError != that.isError) {
return false;
}
if (isTimed != that.isTimed) {
return false;
}
if (name != that.name) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = name.hashCode();
result = 31 * result + (isTimed ? 1 : 0);
result = 31 * result + (isError ? 1 : 0);
return result;
}
} | 6,194 |
0 | Create_ds/ocelli/ocelli-core/src/main/java/netflix | Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/CloseableInstance.java | package netflix.ocelli;
import rx.Observable;
import rx.functions.Func1;
import rx.subjects.BehaviorSubject;
/**
* An Instance that can be manually closed to indicate it is no longer
* in existence and should be removed from the connection pool.
*
* @author elandau
*
* @param <T>
*/
public class CloseableInstance<T> extends Instance<T> {
public static <T> CloseableInstance<T> from(T value) {
return from(value, BehaviorSubject.<Void>create());
}
public static <T> CloseableInstance<T> from(final T value, final BehaviorSubject<Void> lifecycle) {
return new CloseableInstance<T>(value, lifecycle);
}
public static <T> Func1<T, CloseableInstance<T>> toMember() {
return new Func1<T, CloseableInstance<T>>() {
@Override
public CloseableInstance<T> call(T t) {
return from(t);
}
};
}
private T value;
private BehaviorSubject<Void> lifecycle;
public CloseableInstance(T value, BehaviorSubject<Void> lifecycle) {
this.value = value;
this.lifecycle = lifecycle;
}
public String toString() {
return "CloseableInstance[" + getValue() + "]";
}
/**
* onComplete the instance's lifecycle Observable<Void>
*/
public void close() {
lifecycle.onCompleted();
}
@Override
public Observable<Void> getLifecycle() {
return lifecycle;
}
@Override
public T getValue() {
return value;
}
}
| 6,195 |
0 | Create_ds/ocelli/ocelli-core/src/main/java/netflix | Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/InstanceEventListener.java | package netflix.ocelli;
import java.util.concurrent.TimeUnit;
public abstract class InstanceEventListener implements LoadBalancerEventListener<InstanceEvent<?>> {
@Override
public void onEvent(InstanceEvent<?> event, long duration, TimeUnit timeUnit, Throwable throwable, Object value) {
switch ((InstanceEvent.EventType) event.getType()) {
case ExecutionSuccess:
onExecutionSuccess(duration, timeUnit);
break;
case ExecutionFailed:
onExecutionFailed(duration, timeUnit, throwable);
break;
}
}
protected void onExecutionFailed(long duration, TimeUnit timeUnit, Throwable throwable) {
// No Op
}
protected void onExecutionSuccess(long duration, TimeUnit timeUnit) {
// No Op
}
@Override
public void onCompleted() {
// No Op
}
@Override
public void onSubscribe() {
// No Op
}
}
| 6,196 |
0 | Create_ds/ocelli/ocelli-core/src/main/java/netflix | Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/LoadBalancer.java | package netflix.ocelli;
import netflix.ocelli.InstanceQuarantiner.IncarnationFactory;
import netflix.ocelli.loadbalancer.RoundRobinLoadBalancer;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Observable.Transformer;
import rx.Scheduler;
import rx.Subscriber;
import rx.Subscription;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.schedulers.Schedulers;
import rx.subscriptions.Subscriptions;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
/**
* The LoadBalancer tracks lifecycle of entities and selects the next best entity based on plugable
* algorithms such as round robin, choice of two, random, weighted, weighted random, etc.
*
* The LoadBalancer provides two usage modes, LoadBalancer#next and LoadBalancer#toObservable.
* LoadBalancer#next selects the next best entity from the list of known entities based on the
* load balancing algorithm. LoadBalancer#toObservable() creates an Observable that whenever
* subscribed to will emit a single next best entity. Use toObservable() to compose with
* Rx retry operators.
*
* Use one of the fromXXX methods to begin setting up a load balancer builder. The builder begins
* with a source of entities that can be augmented with different topologies and quarantine strategies.
* Finally the load balancer is created by calling build() with the desired algorithm.
*
* @author elandau
*
* @param <T>
*/
public class LoadBalancer<T> {
public static class Builder<T> {
private final Observable<Instance<T>> source;
Builder(Observable<Instance<T>> source) {
this.source = source;
}
/**
* Topology to limit the number of active instances from the previously provided source. A
* topology will track lifecycle of all source provided entities but only send a subset of these
* entities to the load balancer.
*
* @param topology
* @return
*/
public Builder<T> withTopology(Transformer<Instance<T>, Instance<T>> topology) {
return new Builder<T>(source.compose(topology));
}
/**
* The quaratiner creates a new incarnation of each entity while letting the entity manage its
* own lifecycle that onCompletes whenever the entity fails. A separate lifecycle (Observable<Void>)
* is tracked for each incarnation with all incarnation subject to the original entities membership
* lifecycle. Determination failure is deferred to the entity itself.
*
* @param factory
* @param backoffStrategy
* @param scheduler
* @return
*/
public Builder<T> withQuarantiner(IncarnationFactory<T> factory, DelayStrategy backoffStrategy, Scheduler scheduler) {
return new Builder<T>(source.flatMap(InstanceQuarantiner.create(factory, backoffStrategy, scheduler)));
}
/**
* The quaratiner creates a new incarnation of each entity while letting the entity manage its
* own lifecycle that onCompletes whenever the entity fails. A separate lifecycle (Observable<Void>)
* is tracked for each incarnation with all incarnation subject to the original entities membership
* lifecycle. Determination failure is deferred to the entity itself.
*
* @param factory
* @param backoffStrategy
* @param scheduler
* @return
*/
public Builder<T> withQuarantiner(IncarnationFactory<T> factory, DelayStrategy backoffStrategy) {
return new Builder<T>(source.flatMap(InstanceQuarantiner.create(factory, backoffStrategy, Schedulers.io())));
}
/**
* Convert the client from one type to another. Note that topology or failure detection will
* still occur on the previous type
* @param converter
* @return
*/
public <S> Builder<S> convertTo(Func1<Instance<T>, Instance<S>> converter) {
return new Builder<S>(source.map(converter));
}
/**
* Construct the default LoadBalancer using the round robin load balancing strategy
* @return
*/
public LoadBalancer<T> buildDefault() {
return new LoadBalancer<T>(source.compose(InstanceCollector.<T>create()), RoundRobinLoadBalancer.<T>create());
}
/**
* Finally create a load balancer given a specified strategy such as RoundRobin or ChoiceOfTwo
* @param strategy
* @return
*/
public LoadBalancer<T> build(LoadBalancerStrategy<T> strategy) {
return new LoadBalancer<T>(source.compose(InstanceCollector.<T>create()), strategy);
}
/**
* Finally create a load balancer given a specified strategy such as RoundRobin or ChoiceOfTwo
* @param strategy
* @return
*/
public LoadBalancer<T> build(LoadBalancerStrategy<T> strategy, InstanceCollector<T> instanceCollector) {
return new LoadBalancer<T>(source.compose(instanceCollector), strategy);
}
}
/**
* Start the builder from a stream of Instance<T> where each emitted item represents an added
* instances and the instance's Instance#getLifecycle() onCompletes when the instance is removed.
*
* The source can be managed manually via {@link InstanceManager} or may be tied directly to a hot
* Observable from a host registry service such as Eureka. Note that the source may itself be a
* composed Observable that includes transformations from one type to another.
*
* @param source
* @return
*/
public static <T> Builder<T> fromSource(Observable<Instance<T>> source) {
return new Builder<T>(source);
}
/**
* Construct a load balancer builder from a stream of client snapshots. Note that
* T must implement hashCode() and equals() so that a proper delta may determined
* between successive snapshots.
*
* @param source
* @return
*/
public static <T> Builder<T> fromSnapshotSource(Observable<List<T>> source) {
return new Builder<T>(source.compose(new SnapshotToInstance<T>()));
}
/**
* Construct a load balancer builder from a fixed list of clients
* @param clients
* @return
*/
public static <T> Builder<T> fromFixedSource(List<T> clients) {
return fromSnapshotSource(Observable.just(clients));
}
private final static Subscription IDLE_SUBSCRIPTION = Subscriptions.empty();
private final static Subscription SHUTDOWN_SUBSCRIPTION = Subscriptions.empty();
private final AtomicReference<Subscription> subscription;
private volatile List<T> cache;
private final AtomicBoolean isSubscribed = new AtomicBoolean(false);
private final Observable<List<T>> source;
private final LoadBalancerStrategy<T> algo;
// Visible for testing only
static <T> LoadBalancer<T> create(Observable<List<T>> source, LoadBalancerStrategy<T> algo) {
return new LoadBalancer<T>(source, algo);
}
// Visible for testing only
LoadBalancer(final Observable<List<T>> source, final LoadBalancerStrategy<T> algo) {
this.source = source;
this.subscription = new AtomicReference<Subscription>(IDLE_SUBSCRIPTION);
this.cache = cache;
this.algo = algo;
}
public Observable<T> toObservable() {
return Observable.create(new OnSubscribe<T>() {
@Override
public void call(Subscriber<? super T> s) {
try {
s.onNext(next());
s.onCompleted();
}
catch (Exception e) {
s.onError(e);
}
}
});
}
/**
* Select the next best T from the source. Will auto-subscribe to the source on the first
* call to next(). shutdown() must be called to unsubscribe() from the source once the load
* balancer is no longer used.
*
* @return
* @throws NoSuchElementException
*/
public T next() throws NoSuchElementException {
// Auto-subscribe
if (isSubscribed.compareAndSet(false, true)) {
Subscription s = source.subscribe(new Action1<List<T>>() {
@Override
public void call(List<T> t1) {
cache = t1;
}
});
// Prevent subscription after shutdown
if (!subscription.compareAndSet(IDLE_SUBSCRIPTION, s)) {
s.unsubscribe();
}
}
List<T> latest = cache;
if (latest == null) {
throw new NoSuchElementException();
}
return algo.choose(latest);
}
/**
* Shut down the source subscription. This LoadBalancer may no longer be used
* after shutdown is called.
*/
public void shutdown() {
Subscription s = subscription.getAndSet(SHUTDOWN_SUBSCRIPTION);
s.unsubscribe();
cache = null;
}
}
| 6,197 |
0 | Create_ds/ocelli/ocelli-core/src/main/java/netflix | Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/Instances.java | package netflix.ocelli;
import rx.Observable;
import rx.functions.Func1;
public abstract class Instances {
public static <T, S> Func1<Instance<T>, Instance<S>> transform(final Func1<T, S> func) {
return new Func1<Instance<T>, Instance<S>>() {
@Override
public Instance<S> call(final Instance<T> primary) {
final S s = func.call(primary.getValue());
return new Instance<S>() {
@Override
public Observable<Void> getLifecycle() {
return primary.getLifecycle();
}
@Override
public S getValue() {
return s;
}
@Override
public String toString() {
return "Instance[" + primary.getValue() + " -> " + getValue() + "]";
}
};
}
};
}
}
| 6,198 |
0 | Create_ds/ocelli/ocelli-core/src/main/java/netflix | Create_ds/ocelli/ocelli-core/src/main/java/netflix/ocelli/LoadBalancerStrategy.java | package netflix.ocelli;
import java.util.List;
/**
* Strategy that when given a list of candidates selects the single best one.
* @author elandau
*
* @param <T>
*/
public interface LoadBalancerStrategy<T> {
T choose(List<T> candidates);
}
| 6,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.